diff --git a/experiments/README b/experiments/README deleted file mode 100644 index 2f07a1cd1f..0000000000 --- a/experiments/README +++ /dev/null @@ -1,85 +0,0 @@ -This directory archives Fast Downward experiments. - -Experiments using Downward Lab -============================== - -All experiments use their own common_setup.py module that simplifies the -creation of experiments. We recommend using common_setup.py (and related -*.py files) from issue891 for new experiments since it is the most -recent version. - -To isolate the Downward Lab versions used for different experiments and -papers, we recommend using Python virtual environments. To create and -activate a virtual environment for an issue experiment, use the -following steps: - - unset PYTHONPATH # If PYTHONPATH is set, unset it for a clean environment. - cd issue891 - python3 -m venv --prompt=issue891 .venv - source .venv/bin/activate # enter the virtual environment - # list exact versions of your dependencies in a file (for example, lab==4.2) - pip install -r requirements.txt # install dependencies - ./v1-opt.py 1 2 3 4 # run your experiment - deactivate # leave the virtual environment - -Below we list some experiments that show how certain tasks can be -performed. Please note that some of these experiments use outdated Lab and -Python versions. - -- Test changes that affect the whole planner: - - issue481/v1-*.py show the general setup. You will need different - experiments for satisficing, optimal and anytime configurations. - -- Add a custom log parser: - - issue735/v1.py - -- Compare all attributes for tasks where we lose coverage: - - issue439/regressions.py - -- Use custom time limit: - - issue439/issue439.py - -- Add scatter plots for custom attributes: - - issue214/issue214.py - -- Run configurations on the same tasks multiple times to reduce noise: - - issue420/issue420-v1-regressions.py - -- Independent CompareRevisionReports for portfolio configs and core solvers: - - issue462/issue462-opt.py - -- RelativeScatterPlotReport: - - issue77/issue77-v7-opt.py - -- M&S: configs and additional parsing: - - issue914/v1.py and issue914/ms-parser.py - -- Run experiment in debug mode (with assertions): - - issue650/v2.py - -- Translator: - - issue862/v5.py is a translator-only experiment, including a report - that only shows results where the translator output changed - - issue887/v1.py is essentially the same, but with a before/after - comparison rather than a "plain" multiple-configuration report. - - issue862/v5-planner.py is a follow-up experiment to v5.py to see - the impact of the translator changes on the overall planner; it is - essentially a standard planner experiment though, not very - translator-specific; the subset of domains to evaluate is - specified manually - - -Microbenchmarks -=============== - -Some experiments don't run the whole planner or planner components, but -just contain small microbenchmarks for particular functionality. These -may be a good starting point for similar microbenchmarks. Examples: - -- Benchmarking of random number generation: - - issue269/rng-microbenchmark - -If you add your own microbenchmark, it is recommended to start from a -copy of an existing example and follow the naming convention -issue[...]/[...]-microbenchmark for the code. This way, .hgignore -should be set up correctly out of the box. diff --git a/experiments/issue1000/common_setup.py b/experiments/issue1000/common_setup.py deleted file mode 100644 index f2bbda8569..0000000000 --- a/experiments/issue1000/common_setup.py +++ /dev/null @@ -1,398 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - print(config) - for rev1, rev2 in itertools.combinations(self._revisions, 2): - print(rev1, rev2) - for attribute in self.get_supported_attributes( - config.nick, attributes): - print(attribute) - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue1000/optimal.py b/experiments/issue1000/optimal.py deleted file mode 100755 index b0e1f9d702..0000000000 --- a/experiments/issue1000/optimal.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue1000-base-seq-opt-bjolp", "issue1000-v11-seq-opt-bjolp"), - ("issue1000-base-seq-opt-bjolp", "issue1000-v12-seq-opt-bjolp"), - ("issue1000-base-seq-opt-bjolp", "issue1000-v13-seq-opt-bjolp"), - ], attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - - exp.add_report(report) - - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1000-base", "issue1000-v11", "issue1000-v12", - "issue1000-v13"] - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) - -exp.run_steps() - diff --git a/experiments/issue1000/requirements.txt b/experiments/issue1000/requirements.txt deleted file mode 100644 index d4330da5d4..0000000000 --- a/experiments/issue1000/requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -cycler==0.10.0 -kiwisolver==1.3.1 -lab==6.2 -matplotlib==3.3.3 -numpy==1.22.2 -Pillow==9.0.1 -pyparsing==2.4.7 -python-dateutil==2.8.1 -simplejson==3.17.2 -six==1.15.0 -txt2tags==3.7 diff --git a/experiments/issue1000/satisficing.py b/experiments/issue1000/satisficing.py deleted file mode 100755 index dc64a5349f..0000000000 --- a/experiments/issue1000/satisficing.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue1000-base-lama-first", "issue1000-v11-lama-first"), - ("issue1000-base-lama-first", "issue1000-v12-lama-first"), - ("issue1000-base-lama-first", "issue1000-v13-lama-first"), - ], attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - - exp.add_report(report) - - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1000-base", "issue1000-v11", "issue1000-v12", - "issue1000-v13"] - -CONFIGS = [ - IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) - -exp.run_steps() - diff --git a/experiments/issue1000/v14-optimal.py b/experiments/issue1000/v14-optimal.py deleted file mode 100755 index 8eea775de3..0000000000 --- a/experiments/issue1000/v14-optimal.py +++ /dev/null @@ -1,52 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1000-base", "issue1000-v14"] - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_comparison_table_step() - -exp.run_steps() - diff --git a/experiments/issue1000/v14-satisficing.py b/experiments/issue1000/v14-satisficing.py deleted file mode 100755 index 2caee9d733..0000000000 --- a/experiments/issue1000/v14-satisficing.py +++ /dev/null @@ -1,53 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1000-base", "issue1000-v14"] - -CONFIGS = [ - IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_comparison_table_step() - -exp.run_steps() - diff --git a/experiments/issue1004/common_setup.py b/experiments/issue1004/common_setup.py deleted file mode 100644 index eeca3aadb5..0000000000 --- a/experiments/issue1004/common_setup.py +++ /dev/null @@ -1,395 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, lambda: make_scatter_plots) diff --git a/experiments/issue1004/landmark_parser.py b/experiments/issue1004/landmark_parser.py deleted file mode 100755 index 943492471b..0000000000 --- a/experiments/issue1004/landmark_parser.py +++ /dev/null @@ -1,29 +0,0 @@ -#! /usr/bin/env python - -import re - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern( - "lmgraph_generation_time", - r"Landmark graph generation time: (.+)s", - type=float) -parser.add_pattern( - "landmarks", - r"Landmark graph contains (\d+) landmarks, of which \d+ are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_disjunctive", - r"Landmark graph contains \d+ landmarks, of which (\d+) are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_conjunctive", - r"Landmark graph contains \d+ landmarks, of which \d+ are disjunctive and (\d+) are conjunctive.", - type=int) -parser.add_pattern( - "orderings", - r"Landmark graph contains (\d+) orderings.", - type=int) - -parser.parse() diff --git a/experiments/issue1004/v1-v2-optimal.py b/experiments/issue1004/v1-v2-optimal.py deleted file mode 100755 index f503e78a73..0000000000 --- a/experiments/issue1004/v1-v2-optimal.py +++ /dev/null @@ -1,83 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue1004-base-seq-opt-bjolp", "issue1004-v2-seq-opt-bjolp"), - ("issue1004-base-seq-opt-bjolp-opt", "issue1004-v2-seq-opt-bjolp-opt"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - - exp.add_report(report) - - -REVISIONS = [ - "issue1004-base", - "issue1004-v1", - "issue1004-v2", -] - -CONFIGS = [ - common_setup.IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), - common_setup.IssueConfig("lm-exhaust", ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), - common_setup.IssueConfig("lm-hm2", ["--evaluator", "lmc=lmcount(lm_hm(m=2),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), - common_setup.IssueConfig("seq-opt-bjolp-opt", ["--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="tho.keller@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1004/v1-v2-satisficing.py b/experiments/issue1004/v1-v2-satisficing.py deleted file mode 100755 index f82bd9dea3..0000000000 --- a/experiments/issue1004/v1-v2-satisficing.py +++ /dev/null @@ -1,82 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup - -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue1004-base-lama-first", "issue1004-v2-lama-first"), - ("issue1004-base-lama-first-pref", "issue1004-v2-lama-first-pref"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - -REVISIONS = [ - "issue1004-base", - "issue1004-v1", - "issue1004-v2", -] - -CONFIGS = [ - common_setup.IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), - common_setup.IssueConfig("lama-first-pref", ["--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=true)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), - common_setup.IssueConfig("lm-zg", ["--search", "eager_greedy([lmcount(lm_zg(reasonable_orders=false))])"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="tho.keller@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1004/v3-optimal.py b/experiments/issue1004/v3-optimal.py deleted file mode 100755 index b2da1ec5ab..0000000000 --- a/experiments/issue1004/v3-optimal.py +++ /dev/null @@ -1,81 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue1004-base-seq-opt-bjolp", "issue1004-v2-seq-opt-bjolp"), - ("issue1004-base-seq-opt-bjolp-opt", "issue1004-v2-seq-opt-bjolp-opt"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - - exp.add_report(report) - - -REVISIONS = [ - "issue1004-base", - "issue1004-v3", -] - -CONFIGS = [ - common_setup.IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), - common_setup.IssueConfig("seq-opt-bjolp-opt", ["--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="tho.keller@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctiv", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time"), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_comparison_table_step(attributes=ATTRIBUTES) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1004/v3-satisficing.py b/experiments/issue1004/v3-satisficing.py deleted file mode 100755 index ac75be6dad..0000000000 --- a/experiments/issue1004/v3-satisficing.py +++ /dev/null @@ -1,68 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup - -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -REVISIONS = [ - "issue1004-base", - "issue1004-v3", -] - -CONFIGS = [ - common_setup.IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), - common_setup.IssueConfig("lama-first-pref", ["--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=true)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="tho.keller@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctiv", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time"), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_comparison_table_step(attributes=ATTRIBUTES) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1004/v3-v4-optimal.py b/experiments/issue1004/v3-v4-optimal.py deleted file mode 100755 index 69ecc2f05e..0000000000 --- a/experiments/issue1004/v3-v4-optimal.py +++ /dev/null @@ -1,85 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue1004-base-with-stats-seq-opt-bjolp", "issue1004-v3-seq-opt-bjolp"), - ("issue1004-base-with-stats-seq-opt-bjolp", "issue1004-v4-seq-opt-bjolp"), - ("issue1004-v3-seq-opt-bjolp", "issue1004-v4-seq-opt-bjolp"), - ("issue1004-base-with-stats-seq-opt-bjolp-opt", "issue1004-v3-seq-opt-bjolp-opt"), - ("issue1004-base-with-stats-seq-opt-bjolp-opt", "issue1004-v4-seq-opt-bjolp-opt"), - ("issue1004-v3-seq-opt-bjolp-opt", "issue1004-v4-seq-opt-bjolp-opt"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - - exp.add_report(report) - -REVISIONS = [ - "issue1004-base-with-stats", - "issue1004-v3", - "issue1004-v4", -] - -CONFIGS = [ - common_setup.IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), - common_setup.IssueConfig("seq-opt-bjolp-opt", ["--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="tho.keller@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctive", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time"), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1004/v3-v4-satisficing.py b/experiments/issue1004/v3-v4-satisficing.py deleted file mode 100755 index 900f7a1479..0000000000 --- a/experiments/issue1004/v3-v4-satisficing.py +++ /dev/null @@ -1,87 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup - -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue1004-base-with-stats-lama-first", "issue1004-v3-lama-first"), - ("issue1004-base-with-stats-lama-first", "issue1004-v4-lama-first"), - ("issue1004-v3-lama-first", "issue1004-v4-lama-first"), - ("issue1004-base-with-stats-lama-first-pref", "issue1004-v3-lama-first-pref"), - ("issue1004-base-with-stats-lama-first-pref", "issue1004-v4-lama-first-pref"), - ("issue1004-v3-lama-first-pref", "issue1004-v4-lama-first-pref"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - - exp.add_report(report) - -REVISIONS = [ - "issue1004-base-with-stats", - "issue1004-v3", - "issue1004-v4", -] - -CONFIGS = [ - common_setup.IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), - common_setup.IssueConfig("lama-first-pref", ["--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=true)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="tho.keller@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctive", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time"), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1007/average_report.py b/experiments/issue1007/average_report.py deleted file mode 100644 index 0fe072f9a3..0000000000 --- a/experiments/issue1007/average_report.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- - -from downward.reports import PlanningReport -from lab import tools -from lab.reports import geometric_mean - -import os - -DEBUG=False - -class AverageAlgorithmReport(PlanningReport): - """ - This currently only works for some hard-coded attributes. - """ - def __init__(self, algo_name_suffixes, **kwargs): - PlanningReport.__init__(self, **kwargs) - self.algo_name_suffixes=algo_name_suffixes - - def get_text(self): - if not self.outfile.endswith("properties"): - raise ValueError("outfile must be a path to a properties file") - algo_infixes = set() - for algo in self.algorithms: - for suffix in self.algo_name_suffixes: - if suffix in algo: - algo_infixes.add(algo.replace(suffix, '')) - break - # print(algo_infixes) - # print(self.algo_name_suffixes) - props = tools.Properties(self.outfile) - for domain, problem in self.problem_runs.keys(): - if DEBUG: - print(domain, problem) - for algo in algo_infixes: - if DEBUG: - print("Consider ", algo) - properties_key = algo + '-' + domain + '-' + problem - average_algo_dict = {} - average_algo_dict['algorithm'] = algo - average_algo_dict['domain'] = domain - average_algo_dict['problem'] = problem - average_algo_dict['id'] = [algo, domain, problem] - for attribute in self.attributes: - if DEBUG: - print("Consider ", attribute) - values = [] - for suffix in self.algo_name_suffixes: - real_algo = algo + suffix - # if DEBUG: - # print("Composed algo ", real_algo) - real_algo_run = self.runs[(domain, problem, real_algo)] - values.append(real_algo_run.get(attribute)) - if DEBUG: - print(values) - values_without_none = [value for value in values if value is not None] - if attribute in [ - 'coverage', 'cegar_num_iterations', - 'cegar_num_patterns', - 'cegar_total_pdb_size', 'initial_h_value' - 'coverage', 'initial_h_value', - 'cpdbs_num_patterns', 'cpdbs_total_pdb_size', - 'cegar_num_iterations', 'cegar_num_patterns', - 'cegar_total_pdb_size', - ] or 'score' in attribute: - # if 'score' not in attribute: - # assert len(values_without_none) == 10 # does not hold for scores - average_value = sum(values_without_none)/float(len(values)) - elif 'time' in attribute or 'expansions' in attribute: - if len(values_without_none) == 10: - average_value = geometric_mean(values_without_none) - else: - average_value = None - else: - print("Don't know how to handle {}".format(attribute)) - exit(1) - average_algo_dict[attribute] = average_value - props[properties_key] = average_algo_dict - return str(props) diff --git a/experiments/issue1007/cegar-parser.py b/experiments/issue1007/cegar-parser.py deleted file mode 100755 index 9c809452bb..0000000000 --- a/experiments/issue1007/cegar-parser.py +++ /dev/null @@ -1,11 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('cegar_num_iterations', 'CEGAR number of iterations: (\d+)', required=False, type=int) -parser.add_pattern('cegar_num_patterns', 'CEGAR number of patterns: (\d+)', required=False, type=int) -parser.add_pattern('cegar_total_pdb_size', 'CEGAR total PDB size: (\d+)', required=False, type=int) -parser.add_pattern('cegar_computation_time', 'CEGAR computation time: (.+)s', required=False, type=float) - -parser.parse() diff --git a/experiments/issue1007/common_setup.py b/experiments/issue1007/common_setup.py deleted file mode 100644 index eecf49e971..0000000000 --- a/experiments/issue1007/common_setup.py +++ /dev/null @@ -1,427 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, name="make-comparison-tables", revisions=[], **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - if not revisions: - revisions = self._revisions - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(name, make_comparison_tables) - self.add_step( - f"publish-{name}", publish_comparison_tables) - - def add_comparison_table_step_for_revision_pairs( - self, revision_pairs, name="make-comparison-tables-for-revision-pairs", **kwargs): - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in revision_pairs: - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in revision_pairs: - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(name, make_comparison_tables) - self.add_step( - f"publish-{name}", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue1007/cpdbs-parser.py b/experiments/issue1007/cpdbs-parser.py deleted file mode 100755 index 7e4154cb57..0000000000 --- a/experiments/issue1007/cpdbs-parser.py +++ /dev/null @@ -1,10 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('cpdbs_num_patterns', 'Canonical PDB heuristic number of patterns: (\d+)', required=False, type=int) -parser.add_pattern('cpdbs_total_pdb_size', 'Canonical PDB heuristic total PDB size: (\d+)', required=False, type=int) -parser.add_pattern('cpdbs_computation_time', 'Canonical PDB heuristic computation time: (.+)s', required=False, type=float) - -parser.parse() diff --git a/experiments/issue1007/requirements.txt b/experiments/issue1007/requirements.txt deleted file mode 100644 index 5cc94ec468..0000000000 --- a/experiments/issue1007/requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -cycler==0.10.0 -kiwisolver==1.3.1 -lab==6.2 -matplotlib==3.3.4 -numpy==1.22.2 -Pillow==9.0.1 -pyparsing==2.4.7 -python-dateutil==2.8.1 -simplejson==3.17.2 -six==1.15.0 -txt2tags==3.7 diff --git a/experiments/issue1007/v1.py b/experiments/issue1007/v1.py deleted file mode 100755 index 6d86b363dc..0000000000 --- a/experiments/issue1007/v1.py +++ /dev/null @@ -1,65 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-base", "issue1007-v1"] -random_seed=2018 -CONFIGS = [ - ### single cegar - IssueConfig('cpdbs-single-cegar-allgoals-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,ignore_goal_violations=false,wildcard_plans=false,initial=all_goals,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=silent)))'.format(random_seed)]), - IssueConfig('cpdbs-single-cegar-allgoals-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,ignore_goal_violations=false,wildcard_plans=true,initial=all_goals,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=silent)))'.format(random_seed)]), - - ### multiple cegar - IssueConfig('cpdbs-multiple-cegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=silent)))".format(random_seed)]), - IssueConfig('cpdbs-multiple-cegar-regularplans-pdb1m-pdbs10m-t100-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=silent)))".format(random_seed)]), - IssueConfig('cpdbs-multiple-cegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)))".format(random_seed)]), - IssueConfig('cpdbs-multiple-cegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)))".format(random_seed)]), - - IssueConfig('cpdbs-multiple-cegar-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=silent)))".format(random_seed)]), - IssueConfig('cpdbs-multiple-cegar-wildcardplans-pdb1m-pdbs10m-t100-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=silent)))".format(random_seed)]), - IssueConfig('cpdbs-multiple-cegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)))".format(random_seed)]), - IssueConfig('cpdbs-multiple-cegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)))".format(random_seed)]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue1007/v10-fixed-seed.py b/experiments/issue1007/v10-fixed-seed.py deleted file mode 100755 index 4a70e52d4e..0000000000 --- a/experiments/issue1007/v10-fixed-seed.py +++ /dev/null @@ -1,73 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v8c", "issue1007-v10"] -random_seed=2018 -MAX_TIME=100 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [ - ### ipdb - IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)']), - - ### single cegar - IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - - ### multiple cegar - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), - - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue1007/v10-multiple-seeds.py b/experiments/issue1007/v10-multiple-seeds.py deleted file mode 100755 index 48e868dfee..0000000000 --- a/experiments/issue1007/v10-multiple-seeds.py +++ /dev/null @@ -1,93 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v8c", "issue1007-v10"] -MAX_TIME=20 -if common_setup.is_test_run(): - MAX_TIME=1 -CONFIGS = [] -for random_seed in range(2018, 2028): - ### ipdb - CONFIGS.append(IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])), - - ### single cegar - CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step(attributes=['coverage']) - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory'], - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True) -exp._configs = [ - IssueConfig('cpdbs-hillclimbing-pdb1m-pdbs10m-t20', []), - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step_for_revision_pairs( - revision_pairs=[ - ("issue1007-v8c", "issue1007-v10"), - ], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory'], -) - -exp.run_steps() diff --git a/experiments/issue1007/v11-v12-fixed-seed.py b/experiments/issue1007/v11-v12-fixed-seed.py deleted file mode 100755 index 4b52ae2e1d..0000000000 --- a/experiments/issue1007/v11-v12-fixed-seed.py +++ /dev/null @@ -1,79 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v8c", "issue1007-v10", "issue1007-v11", "issue1007-v12"] -random_seed=2018 -MAX_TIME=100 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [ - ### ipdb - IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)']), - - ### single cegar - IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - - ### multiple cegar - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), - - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -exp.add_comparison_table_step_for_revision_pairs( - revision_pairs=[ - ("issue1007-v10", "issue1007-v11"), - ("issue1007-v11", "issue1007-v12"), - ("issue1007-v8c", "issue1007-v12"), - ], -) - -exp.run_steps() diff --git a/experiments/issue1007/v11-v12-multiple-seeds.py b/experiments/issue1007/v11-v12-multiple-seeds.py deleted file mode 100755 index fd1de866a9..0000000000 --- a/experiments/issue1007/v11-v12-multiple-seeds.py +++ /dev/null @@ -1,93 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v11", "issue1007-v12"] -MAX_TIME=20 -if common_setup.is_test_run(): - MAX_TIME=1 -CONFIGS = [] -for random_seed in range(2018, 2028): - ### ipdb - CONFIGS.append(IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])), - - ### single cegar - CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step(attributes=['coverage']) - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory'], - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True) -exp._configs = [ - IssueConfig('cpdbs-hillclimbing-pdb1m-pdbs10m-t20', []), - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step_for_revision_pairs( - revision_pairs=[ - ("issue1007-v11", "issue1007-v12"), - ], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory'], -) - -exp.run_steps() diff --git a/experiments/issue1007/v13-fixed-seed.py b/experiments/issue1007/v13-fixed-seed.py deleted file mode 100755 index 04990e49ce..0000000000 --- a/experiments/issue1007/v13-fixed-seed.py +++ /dev/null @@ -1,143 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v8c", "issue1007-v13"] -random_seed=2018 -MAX_TIME=100 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [ - ### ipdb - IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)']), - - ### single cegar - IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - - ### multiple cegar - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), - - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('cegar-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True) -cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True) -cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True) -cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True) - -attributes = [ - cpdbs_num_patterns, - cpdbs_total_pdb_size, - cpdbs_computation_time, - cegar_num_iterations, - cegar_num_patterns, - cegar_total_pdb_size, - cegar_computation_time, -] -attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES) -attributes.append('initial_h_value') - -exp.add_parse_again_step() - -exp.add_absolute_report_step(attributes=attributes) - -outfile = os.path.join( - exp.eval_dir, - f"{exp.name}-{REVISIONS[0]}-{REVISIONS[1]}-compare-hillclimbing.html") -name="make-comparison-tables-hillclimbing" -exp.add_report( - ComparativeReport( - [ - (f'{REVISIONS[0]}-cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}'), - ], - attributes=attributes, - ), - name=name, - outfile=outfile, -) -exp.add_step( - f"publish-{name}", - subprocess.call, - ["publish", outfile], -) - -outfile = os.path.join( - exp.eval_dir, - f"{exp.name}-{REVISIONS[0]}-{REVISIONS[1]}-compare-cegar.html") -name="make-comparison-tables-cegar" -exp.add_report( - ComparativeReport( - [ - (f'{REVISIONS[0]}-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}'), - (f'{REVISIONS[0]}-cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}'), - (f'{REVISIONS[0]}-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}'), - (f'{REVISIONS[0]}-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', f'{REVISIONS[1]}-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}'), - (f'{REVISIONS[0]}-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}'), - (f'{REVISIONS[0]}-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', f'{REVISIONS[1]}-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}'), - ], - attributes=attributes, - ), - name=name, - outfile=outfile, -) -exp.add_step( - f"publish-{name}", - subprocess.call, - ["publish", outfile], -) - -exp.run_steps() diff --git a/experiments/issue1007/v13-multiple-seeds.py b/experiments/issue1007/v13-multiple-seeds.py deleted file mode 100755 index 5dfeb9b38b..0000000000 --- a/experiments/issue1007/v13-multiple-seeds.py +++ /dev/null @@ -1,105 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v8c", "issue1007-v13"] -MAX_TIME=20 -if common_setup.is_test_run(): - MAX_TIME=1 -CONFIGS = [] -for random_seed in range(2018, 2028): - ### ipdb - CONFIGS.append(IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])), - - ### single cegar - CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('cegar-parser.py') - -cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True) -cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True) -cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True) -cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True) - -exp.add_parse_again_step() - -exp.add_absolute_report_step(attributes=['coverage']) - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory', cegar_num_iterations, - cegar_num_patterns, cegar_total_pdb_size, cegar_computation_time], - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True) -exp._configs = [ - IssueConfig('cpdbs-hillclimbing-pdb1m-pdbs10m-t20', []), - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step_for_revision_pairs( - revision_pairs=[ - ("issue1007-v8c", "issue1007-v13"), - ], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory', cegar_num_iterations, - cegar_num_patterns, cegar_total_pdb_size, cegar_computation_time], -) - -exp.run_steps() diff --git a/experiments/issue1007/v14-fixed-seed.py b/experiments/issue1007/v14-fixed-seed.py deleted file mode 100755 index ee910080f4..0000000000 --- a/experiments/issue1007/v14-fixed-seed.py +++ /dev/null @@ -1,170 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v8c", "issue1007-v14"] -random_seed=2018 -MAX_TIME=100 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [ - ### ipdb - IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)']), - - ### single cegar - IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - - ### multiple cegar - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), - - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('cegar-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) -cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True) -cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True) -cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True) -cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True) -score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False) - -attributes = [ - cpdbs_num_patterns, - cpdbs_total_pdb_size, - cpdbs_computation_time, - score_cpdbs_computation_time, - cegar_num_iterations, - cegar_num_patterns, - cegar_total_pdb_size, - cegar_computation_time, - score_cegar_computation_time, -] -attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES) -attributes.append('initial_h_value') - -def add_computation_time_score(run): - """ - Convert cegar/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_absolute_report_step(attributes=attributes) - -outfile = os.path.join( - exp.eval_dir, - f"{exp.name}-{REVISIONS[0]}-{REVISIONS[1]}-compare-hillclimbing.html") -name="make-comparison-tables-hillclimbing" -exp.add_report( - ComparativeReport( - [ - (f'{REVISIONS[0]}-cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}'), - ], - attributes=attributes, - filter=[add_computation_time_score], - ), - name=name, - outfile=outfile, -) -exp.add_step( - f"publish-{name}", - subprocess.call, - ["publish", outfile], -) - -outfile = os.path.join( - exp.eval_dir, - f"{exp.name}-{REVISIONS[0]}-{REVISIONS[1]}-compare-cegar.html") -name="make-comparison-tables-cegar" -exp.add_report( - ComparativeReport( - [ - (f'{REVISIONS[0]}-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}'), - (f'{REVISIONS[0]}-cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}'), - (f'{REVISIONS[0]}-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}'), - (f'{REVISIONS[0]}-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', f'{REVISIONS[1]}-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}'), - (f'{REVISIONS[0]}-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', f'{REVISIONS[1]}-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}'), - (f'{REVISIONS[0]}-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', f'{REVISIONS[1]}-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}'), - ], - attributes=attributes, - filter=[add_computation_time_score], - ), - name=name, - outfile=outfile, -) -exp.add_step( - f"publish-{name}", - subprocess.call, - ["publish", outfile], -) - -exp.run_steps() diff --git a/experiments/issue1007/v14-multiple-seeds.py b/experiments/issue1007/v14-multiple-seeds.py deleted file mode 100755 index 7f1a103c96..0000000000 --- a/experiments/issue1007/v14-multiple-seeds.py +++ /dev/null @@ -1,143 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v8c", "issue1007-v14"] -MAX_TIME=20 -if common_setup.is_test_run(): - MAX_TIME=1 -CONFIGS = [] -for random_seed in range(2018, 2028): - ### ipdb - CONFIGS.append(IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])), - - ### single cegar - CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('cegar-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) -cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True) -cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True) -cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True) -cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True) -score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False) - -exp.add_absolute_report_step(attributes=['coverage']) - -def add_computation_time_score(run): - """ - Convert cegar/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=[ - 'coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory', 'score_expansions', - 'initial_h_value', cpdbs_num_patterns, - cpdbs_total_pdb_size, cpdbs_computation_time, - score_cpdbs_computation_time, cegar_num_iterations, - cegar_num_patterns, cegar_total_pdb_size, - cegar_computation_time, score_cegar_computation_time, - ], - filter=[add_computation_time_score], - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True) -exp._configs = [ - IssueConfig('cpdbs-hillclimbing-pdb1m-pdbs10m-t20', []), - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step_for_revision_pairs( - revision_pairs=[ - (f"{REVISIONS[0]}", f"{REVISIONS[1]}"), - ], - attributes=[ - 'coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory', 'score_expansions', - 'initial_h_value', cpdbs_num_patterns, cpdbs_total_pdb_size, - cpdbs_computation_time, score_cpdbs_computation_time, - cegar_num_iterations, cegar_num_patterns, cegar_total_pdb_size, - cegar_computation_time, score_cegar_computation_time, - ], -) - -exp.run_steps() diff --git a/experiments/issue1007/v15-fixed-seed.py b/experiments/issue1007/v15-fixed-seed.py deleted file mode 100755 index 3eda2858cb..0000000000 --- a/experiments/issue1007/v15-fixed-seed.py +++ /dev/null @@ -1,137 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v15"] -random_seed=2018 -MAX_TIME=100 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [ - ### single cegar - IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - - ### multiple cegar - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), - - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('cegar-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) -cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True) -cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True) -cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True) -cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True) -score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False) - -attributes = [ - cpdbs_num_patterns, - cpdbs_total_pdb_size, - cpdbs_computation_time, - score_cpdbs_computation_time, - cegar_num_iterations, - cegar_num_patterns, - cegar_total_pdb_size, - cegar_computation_time, - score_cegar_computation_time, -] -attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES) -attributes.append('initial_h_value') - -def add_computation_time_score(run): - """ - Convert cegar/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_absolute_report_step(attributes=attributes,filter=[add_computation_time_score]) - -exp.add_fetcher('data/issue1007-v14-fixed-seed-eval', filter_algorithm=[ - f'issue1007-v14-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', - f'issue1007-v14-cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', - f'issue1007-v14-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', - f'issue1007-v14-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', - f'issue1007-v14-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', - f'issue1007-v14-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', -],merge=True) - -exp.add_comparison_table_step_for_revision_pairs( - revision_pairs=[ - ("issue1007-v14", "issue1007-v15"), - ], - attributes=attributes, - filter=[add_computation_time_score], -) - -exp.run_steps() diff --git a/experiments/issue1007/v15-ipdb-sys.py b/experiments/issue1007/v15-ipdb-sys.py deleted file mode 100755 index 5b7d37763f..0000000000 --- a/experiments/issue1007/v15-ipdb-sys.py +++ /dev/null @@ -1,103 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-base-v2", "issue1007-v15"] -random_seed=2018 -MAX_TIME=900 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [ - IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)']), - IssueConfig('cpdbs-sys2', ['--search', 'astar(cpdbs(systematic(pattern_max_size=2)),verbosity=silent)']), - IssueConfig('cpdbs-sys3', ['--search', 'astar(cpdbs(systematic(pattern_max_size=3)),verbosity=silent)']), - -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) - -attributes = [ - cpdbs_num_patterns, - cpdbs_total_pdb_size, - cpdbs_computation_time, - score_cpdbs_computation_time, -] -attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES) -attributes.append('initial_h_value') - -def add_computation_time_score(run): - """ - Convert cegar/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_absolute_report_step(attributes=attributes,filter=[add_computation_time_score]) -exp.add_comparison_table_step(attributes=attributes,filter=[add_computation_time_score]) - -exp.run_steps() diff --git a/experiments/issue1007/v15-multiple-seeds.py b/experiments/issue1007/v15-multiple-seeds.py deleted file mode 100755 index 90a26db4ed..0000000000 --- a/experiments/issue1007/v15-multiple-seeds.py +++ /dev/null @@ -1,147 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v15"] -MAX_TIME=20 -if common_setup.is_test_run(): - MAX_TIME=1 -CONFIGS = [] -for random_seed in range(2018, 2028): - ### single cegar - CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('cegar-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) -cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True) -cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True) -cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True) -cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True) -score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False) - -exp.add_absolute_report_step(attributes=['coverage']) - -### compare against v14 -exp.add_fetcher('data/issue1007-v14-multiple-seeds-eval',merge=True,filter_algorithm=[ - 'issue1007-v14-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{}'.format(random_seed) for random_seed in range(2018, 2028) -] + [ - 'issue1007-v14-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028) -]) - -def add_computation_time_score(run): - """ - Convert cegar/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=[ - 'coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory', 'score_expansions', - 'initial_h_value', cpdbs_num_patterns, - cpdbs_total_pdb_size, cpdbs_computation_time, - score_cpdbs_computation_time, cegar_num_iterations, - cegar_num_patterns, cegar_total_pdb_size, - cegar_computation_time, score_cegar_computation_time, - ], - filter=[add_computation_time_score], - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True) -exp._configs = [ - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step_for_revision_pairs( - revision_pairs=[ - ("issue1007-v14", "issue1007-v15"), - ], - attributes=[ - 'coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory', 'score_expansions', - 'initial_h_value', cpdbs_num_patterns, cpdbs_total_pdb_size, - cpdbs_computation_time, score_cpdbs_computation_time, - cegar_num_iterations, cegar_num_patterns, cegar_total_pdb_size, - cegar_computation_time, score_cegar_computation_time, - ], - filter=[add_computation_time_score], -) - -exp.run_steps() diff --git a/experiments/issue1007/v16-v17-fixed-seed.py b/experiments/issue1007/v16-v17-fixed-seed.py deleted file mode 100755 index 3c85cebf84..0000000000 --- a/experiments/issue1007/v16-v17-fixed-seed.py +++ /dev/null @@ -1,139 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v16", "issue1007-v17"] -random_seed=2018 -MAX_TIME=100 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [ - ### single cegar - IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - - ### multiple cegar - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), - - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('cegar-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) -cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True) -cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True) -cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True) -cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True) -score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False) - -attributes = [ - cpdbs_num_patterns, - cpdbs_total_pdb_size, - cpdbs_computation_time, - score_cpdbs_computation_time, - cegar_num_iterations, - cegar_num_patterns, - cegar_total_pdb_size, - cegar_computation_time, - score_cegar_computation_time, -] -attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES) -attributes.append('initial_h_value') - -def add_computation_time_score(run): - """ - Convert cegar/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_absolute_report_step(attributes=attributes,filter=[add_computation_time_score]) - -exp.add_fetcher('data/issue1007-v15-fixed-seed-eval', filter_algorithm=[ - f'issue1007-v15-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', - f'issue1007-v15-cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', - f'issue1007-v15-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', - f'issue1007-v15-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', - f'issue1007-v15-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', - f'issue1007-v15-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', -],merge=True) - -exp.add_comparison_table_step_for_revision_pairs( - revision_pairs=[ - ("issue1007-v15", "issue1007-v16"), - ("issue1007-v15", "issue1007-v17"), - ("issue1007-v16", "issue1007-v17"), - ], - attributes=attributes, - filter=[add_computation_time_score], -) - -exp.run_steps() diff --git a/experiments/issue1007/v16-v17-ipdb-sys.py b/experiments/issue1007/v16-v17-ipdb-sys.py deleted file mode 100755 index 562e4a79f6..0000000000 --- a/experiments/issue1007/v16-v17-ipdb-sys.py +++ /dev/null @@ -1,120 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-base-v2", "issue1007-v16", "issue1007-v17"] -random_seed=2018 -MAX_TIME=900 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [ - IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)']), - IssueConfig('cpdbs-sys2', ['--search', 'astar(cpdbs(systematic(pattern_max_size=2)),verbosity=silent)']), - IssueConfig('cpdbs-sys3', ['--search', 'astar(cpdbs(systematic(pattern_max_size=3)),verbosity=silent)']), - -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) - -attributes = [ - cpdbs_num_patterns, - cpdbs_total_pdb_size, - cpdbs_computation_time, - score_cpdbs_computation_time, -] -attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES) -attributes.append('initial_h_value') - -def add_computation_time_score(run): - """ - Convert cegar/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_absolute_report_step(attributes=attributes,filter=[add_computation_time_score]) - -exp.add_fetcher('data/issue1007-v15-ipdb-sys-eval', filter_algorithm=[ - f'issue1007-v15-cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', - f'issue1007-v15-cpdbs-sys2', - f'issue1007-v15-cpdbs-sys3', -],merge=True) - -exp.add_comparison_table_step_for_revision_pairs( - revision_pairs=[ - ("issue1007-v15", "issue1007-v16"), - ("issue1007-v16", "issue1007-v17"), - ("issue1007-v15", "issue1007-v17"), - ("issue1007-base-v2", "issue1007-v16"), - ("issue1007-base-v2", "issue1007-v17"), - ], - attributes=attributes, - filter=[add_computation_time_score], -) - -exp.run_steps() diff --git a/experiments/issue1007/v16-v17-multiple-seeds.py b/experiments/issue1007/v16-v17-multiple-seeds.py deleted file mode 100755 index 80aecc2260..0000000000 --- a/experiments/issue1007/v16-v17-multiple-seeds.py +++ /dev/null @@ -1,149 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v16", "issue1007-v17"] -MAX_TIME=20 -if common_setup.is_test_run(): - MAX_TIME=1 -CONFIGS = [] -for random_seed in range(2018, 2028): - ### single cegar - CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('cegar-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) -cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True) -cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True) -cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True) -cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True) -score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False) - -exp.add_absolute_report_step(attributes=['coverage']) - -### compare against v15 -exp.add_fetcher('data/issue1007-v15-multiple-seeds-eval',merge=True,filter_algorithm=[ - 'issue1007-v15-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{}'.format(random_seed) for random_seed in range(2018, 2028) -] + [ - 'issue1007-v15-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028) -]) - -def add_computation_time_score(run): - """ - Convert cegar/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=[ - 'coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory', 'score_expansions', - 'initial_h_value', cpdbs_num_patterns, - cpdbs_total_pdb_size, cpdbs_computation_time, - score_cpdbs_computation_time, cegar_num_iterations, - cegar_num_patterns, cegar_total_pdb_size, - cegar_computation_time, score_cegar_computation_time, - ], - filter=[add_computation_time_score], - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True) -exp._configs = [ - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step_for_revision_pairs( - revision_pairs=[ - ("issue1007-v15", "issue1007-v16"), - ("issue1007-v15", "issue1007-v17"), - ("issue1007-v16", "issue1007-v17"), - ], - attributes=[ - 'coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory', 'score_expansions', - 'initial_h_value', cpdbs_num_patterns, cpdbs_total_pdb_size, - cpdbs_computation_time, score_cpdbs_computation_time, - cegar_num_iterations, cegar_num_patterns, cegar_total_pdb_size, - cegar_computation_time, score_cegar_computation_time, - ], - filter=[add_computation_time_score], -) - -exp.run_steps() diff --git a/experiments/issue1007/v18-fixed-seed.py b/experiments/issue1007/v18-fixed-seed.py deleted file mode 100755 index a28051aa4d..0000000000 --- a/experiments/issue1007/v18-fixed-seed.py +++ /dev/null @@ -1,125 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v17", "issue1007-v18"] -random_seed=2018 -MAX_TIME=100 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [ - ### single cegar - IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - - ### multiple cegar - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), - - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('cegar-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) -cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True) -cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True) -cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True) -cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True) -score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False) - -attributes = [ - cpdbs_num_patterns, - cpdbs_total_pdb_size, - cpdbs_computation_time, - score_cpdbs_computation_time, - cegar_num_iterations, - cegar_num_patterns, - cegar_total_pdb_size, - cegar_computation_time, - score_cegar_computation_time, -] -attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES) -attributes.append('initial_h_value') - -def add_computation_time_score(run): - """ - Convert cegar/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_absolute_report_step(attributes=attributes,filter=[add_computation_time_score]) - -exp.add_comparison_table_step( - attributes=attributes, - filter=[add_computation_time_score], -) - -exp.run_steps() diff --git a/experiments/issue1007/v18-multiple-seeds.py b/experiments/issue1007/v18-multiple-seeds.py deleted file mode 100755 index 42d93ab5fc..0000000000 --- a/experiments/issue1007/v18-multiple-seeds.py +++ /dev/null @@ -1,137 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v17", "issue1007-v18"] -MAX_TIME=20 -if common_setup.is_test_run(): - MAX_TIME=1 -CONFIGS = [] -for random_seed in range(2018, 2028): - ### single cegar - CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(use_wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(use_wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('cegar-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) -cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True) -cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True) -cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True) -cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True) -score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False) - -exp.add_absolute_report_step(attributes=['coverage']) - -def add_computation_time_score(run): - """ - Convert cegar/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=[ - 'coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory', 'score_expansions', - 'initial_h_value', cpdbs_num_patterns, - cpdbs_total_pdb_size, cpdbs_computation_time, - score_cpdbs_computation_time, cegar_num_iterations, - cegar_num_patterns, cegar_total_pdb_size, - cegar_computation_time, score_cegar_computation_time, - ], - filter=[add_computation_time_score], - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True) -exp._configs = [ - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step( - attributes=[ - 'coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory', 'score_expansions', - 'initial_h_value', cpdbs_num_patterns, cpdbs_total_pdb_size, - cpdbs_computation_time, score_cpdbs_computation_time, - cegar_num_iterations, cegar_num_patterns, cegar_total_pdb_size, - cegar_computation_time, score_cegar_computation_time, - ], - filter=[add_computation_time_score], -) - -exp.run_steps() diff --git a/experiments/issue1007/v2-best-average.py b/experiments/issue1007/v2-best-average.py deleted file mode 100755 index 4b5af25b80..0000000000 --- a/experiments/issue1007/v2-best-average.py +++ /dev/null @@ -1,30 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -REVISIONS = ["issue1007-v1", "issue1007-v2"] -CONFIGS = [ - IssueConfig('cpdbs-multiple-cegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20', []), -] - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, -) - -exp.add_comparison_table_step( - attributes=['coverage', 'search_time', 'total_time', 'expansions_until_last_jump'] -) - -exp.run_steps() diff --git a/experiments/issue1007/v2-best.py b/experiments/issue1007/v2-best.py deleted file mode 100755 index d3fc373be3..0000000000 --- a/experiments/issue1007/v2-best.py +++ /dev/null @@ -1,58 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v1", "issue1007-v2"] -CONFIGS = [] -for random_seed in range(2018, 2028): - CONFIGS.append(IssueConfig('cpdbs-multiple-cegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)),verbosity=silent)".format(random_seed)])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() - -report = AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - directory=os.path.join('data', exp.name + '-average-eval'), - attributes=['coverage', 'search_time', 'total_time', 'expansions_until_last_jump']) -outfile = os.path.join(exp.eval_dir, "dummy.txt") -exp.add_report(report, outfile=outfile, name="report-average") - -exp.run_steps() diff --git a/experiments/issue1007/v3-parser.py b/experiments/issue1007/v3-parser.py deleted file mode 100755 index 2524e884bb..0000000000 --- a/experiments/issue1007/v3-parser.py +++ /dev/null @@ -1,24 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('single_cegar_pdbs_computation_time', 'CEGAR_PDBs: computation time: (.+)s', required=False, type=float) -parser.add_pattern('single_cegar_pdbs_num_iterations', 'CEGAR_PDBs: number of iterations: (\d+)', required=False, type=int) -parser.add_pattern('single_cegar_pdbs_collection_num_patterns', 'CEGAR_PDBs: final collection number of patterns: (.+)', required=False, type=int) -parser.add_pattern('single_cegar_pdbs_collection_summed_pdb_size', 'CEGAR_PDBs: final collection summed PDB sizes: (.+)', required=False, type=int) - -def parse_lines(content, props): - single_cegar_pdbs_timed_out = False - single_cegar_pdbs_solved_without_search = False - for line in content.split('\n'): - if line == 'CEGAR_PDBs: time limit reached': - single_cegar_pdbs_timed_out = True - if line == 'CEGAR_PDBs: task solved during computation of abstract solutions': - single_cegar_pdbs_solved_without_search = True - props['single_cegar_pdbs_timed_out'] = single_cegar_pdbs_timed_out - props['single_cegar_pdbs_solved_without_search'] = single_cegar_pdbs_solved_without_search - -parser.add_function(parse_lines) - -parser.parse() diff --git a/experiments/issue1007/v3-single-cegar-wildcard-average.py b/experiments/issue1007/v3-single-cegar-wildcard-average.py deleted file mode 100755 index a6e057c740..0000000000 --- a/experiments/issue1007/v3-single-cegar-wildcard-average.py +++ /dev/null @@ -1,32 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -REVISIONS = ["issue1007-v2", "issue1007-v3"] -CONFIGS = [ - IssueConfig('cpdbs-single-cegar-allgoals-wildcardplans-pdb1m-pdbs10m-t100', []), -] - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, -) - -exp.add_comparison_table_step( - attributes=['coverage', 'single_cegar_pdbs_solved_without_search', - 'single_cegar_pdbs_computation_time', 'search_time', 'total_time', - 'expansions_until_last_jump'] -) - -exp.run_steps() diff --git a/experiments/issue1007/v3-single-cegar-wildcard.py b/experiments/issue1007/v3-single-cegar-wildcard.py deleted file mode 100755 index e761691c35..0000000000 --- a/experiments/issue1007/v3-single-cegar-wildcard.py +++ /dev/null @@ -1,71 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v2", "issue1007-v3"] -CONFIGS = [] -for random_seed in range(2018, 2028): - CONFIGS.append(IssueConfig('cpdbs-single-cegar-allgoals-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,ignore_goal_violations=false,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('v3-parser.py') - -attributes=exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend([ - 'single_cegar_pdbs_solved_without_search', - 'single_cegar_pdbs_computation_time', - 'single_cegar_pdbs_timed_out', - 'single_cegar_pdbs_num_iterations', - 'single_cegar_pdbs_collection_num_patterns', - 'single_cegar_pdbs_collection_summed_pdb_size', -]) -exp.add_absolute_report_step() - -report = AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - directory=os.path.join('data', exp.name + '-average-eval'), - attributes=['coverage', 'single_cegar_pdbs_solved_without_search', - 'single_cegar_pdbs_computation_time', 'search_time', 'total_time', - 'expansions_until_last_jump'] -) -outfile = os.path.join(exp.eval_dir, "dummy.txt") -exp.add_report(report, outfile=outfile, name="report-average") - -exp.run_steps() diff --git a/experiments/issue1007/v3.py b/experiments/issue1007/v3.py deleted file mode 100755 index fe1a03accd..0000000000 --- a/experiments/issue1007/v3.py +++ /dev/null @@ -1,54 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v2", "issue1007-v3"] -random_seed=2018 -CONFIGS = [ - ### single cegar - IssueConfig('cpdbs-single-cegar-allgoals-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,ignore_goal_violations=false,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=silent)),verbosity=silent)'.format(random_seed)]), - IssueConfig('cpdbs-single-cegar-allgoals-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,ignore_goal_violations=false,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=silent)),verbosity=silent)'.format(random_seed)]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue1007/v4-a-multiple-seeds.py b/experiments/issue1007/v4-a-multiple-seeds.py deleted file mode 100755 index 83e4e4a2ee..0000000000 --- a/experiments/issue1007/v4-a-multiple-seeds.py +++ /dev/null @@ -1,88 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v4-a"] -CONFIGS = [] -for random_seed in range(2018, 2028): - ### single cegar - CONFIGS.append(IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=false,max_time=20,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=20,stagnation_limit=4,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)),verbosity=silent)".format(random_seed)], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_fetcher('data/issue1007-v4-multiple-seeds-eval',merge=True,filter_algorithm=[ - 'issue1007-v4-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028) -] + [ - 'issue1007-v4-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028) -]) -exp._revisions=["issue1007-v4", "issue1007-v4-a"] -exp.add_absolute_report_step(attributes=['coverage']) - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump'] - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher('data/issue1007-v4-a-multiple-seeds-eval/average', merge=True) -exp._configs = [ - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step( - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump'] -) - - -exp.run_steps() diff --git a/experiments/issue1007/v4-b-multiple-seeds.py b/experiments/issue1007/v4-b-multiple-seeds.py deleted file mode 100755 index df1116cf9a..0000000000 --- a/experiments/issue1007/v4-b-multiple-seeds.py +++ /dev/null @@ -1,88 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v4-b"] -CONFIGS = [] -for random_seed in range(2018, 2028): - ### single cegar - CONFIGS.append(IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,wildcard_plans=false,max_time=20,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=20,stagnation_limit=4,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)".format(random_seed)], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_fetcher('data/issue1007-v4-a-multiple-seeds-eval',merge=True,filter_algorithm=[ - 'issue1007-v4-a-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028) -] + [ - 'issue1007-v4-a-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028) -]) -exp._revisions=["issue1007-v4-a", "issue1007-v4-b"] -exp.add_absolute_report_step(attributes=['coverage']) - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump'] - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher('data/issue1007-v4-b-multiple-seeds-eval/average', merge=True) -exp._configs = [ - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step( - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump'] -) - - -exp.run_steps() diff --git a/experiments/issue1007/v4-multiple-seeds.py b/experiments/issue1007/v4-multiple-seeds.py deleted file mode 100755 index 685a8f77a8..0000000000 --- a/experiments/issue1007/v4-multiple-seeds.py +++ /dev/null @@ -1,61 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v4"] -CONFIGS = [] -for random_seed in range(2018, 2028): - ### single cegar - CONFIGS.append(IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=false,max_time=20,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=20,stagnation_limit=4,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)),verbosity=silent)".format(random_seed)], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step(attributes=['coverage']) - -exp.run_steps() diff --git a/experiments/issue1007/v4-single-cegar-allow-merging-options.py b/experiments/issue1007/v4-single-cegar-allow-merging-options.py deleted file mode 100755 index 16199aee9d..0000000000 --- a/experiments/issue1007/v4-single-cegar-allow-merging-options.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v4"] -random_seed=2018 -CONFIGS = [ - ### cpdbs - IssueConfig('cpdbs-singlecegar-wildcardplans-allowmergingall-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]), - IssueConfig('cpdbs-singlecegar-wildcardplans-allowmergingprec-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=precondition_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]), - IssueConfig('cpdbs-singlecegar-wildcardplans-forbidmerging-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=never,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]), - ### pho - IssueConfig('pho-singlecegar-wildcardplans-allowmergingall-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(operatorcounting(constraint_generators=[pho_constraints(patterns=single_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal))]),verbosity=silent)'.format(random_seed)]), - IssueConfig('pho-singlecegar-wildcardplans-allowmergingprec-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(operatorcounting(constraint_generators=[pho_constraints(patterns=single_cegar(max_refinements=infinity,allow_merging=precondition_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal))]),verbosity=silent)'.format(random_seed)]), - IssueConfig('pho-singlecegar-wildcardplans-forbidmerging-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(operatorcounting(constraint_generators=[pho_constraints(patterns=single_cegar(max_refinements=infinity,allow_merging=never,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal))]),verbosity=silent)'.format(random_seed)]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('v3-parser.py') - -attributes=exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend([ - 'single_cegar_pdbs_solved_without_search', - 'single_cegar_pdbs_computation_time', - 'single_cegar_pdbs_timed_out', - 'single_cegar_pdbs_num_iterations', - 'single_cegar_pdbs_collection_num_patterns', - 'single_cegar_pdbs_collection_summed_pdb_size', -]) -exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue1007/v4.py b/experiments/issue1007/v4.py deleted file mode 100755 index 71baf4b514..0000000000 --- a/experiments/issue1007/v4.py +++ /dev/null @@ -1,66 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v4"] -random_seed=2018 -CONFIGS = [ - ### single cegar - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]), - IssueConfig('cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]), - - ### multiple cegar - IssueConfig('cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=silent)),verbosity=silent)".format(random_seed)]), - IssueConfig('cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)),verbosity=silent)".format(random_seed)]), - - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=silent)),verbosity=silent)".format(random_seed)]), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)),verbosity=silent)".format(random_seed)]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() - -exp.run_steps() diff --git a/experiments/issue1007/v5-multiple-seeds.py b/experiments/issue1007/v5-multiple-seeds.py deleted file mode 100755 index 21f50c723b..0000000000 --- a/experiments/issue1007/v5-multiple-seeds.py +++ /dev/null @@ -1,137 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v5"] -CONFIGS = [] -for random_seed in range(2018, 2028): - ### single cegar - CONFIGS.append(IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,wildcard_plans=false,max_time=20,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=20,stagnation_limit=4,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)".format(random_seed)], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step(attributes=['coverage']) - -### compare against v4 -exp.add_fetcher('data/issue1007-v4-multiple-seeds-eval',merge=True,filter_algorithm=[ - 'issue1007-v4-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028) -] + [ - 'issue1007-v4-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028) -]) - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump'], - filter_algorithm=[ - 'issue1007-v4-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028) - ] + [ - 'issue1007-v4-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028) - ] + [ - 'issue1007-v5-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028) - ] + [ - 'issue1007-v5-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028) - ], - ), - outfile=os.path.join(exp.eval_dir, "average-v4-v5", "properties"), - name="report-average-v4-v5" -) - -exp.add_fetcher('data/issue1007-v5-multiple-seeds-eval/average-v4-v5', merge=True) -exp._configs = [ - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step( - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump'], - name="compare-v4-v5", - revisions=["issue1007-v4", "issue1007-v5"], -) - -### compare against v4-b -exp.add_fetcher('data/issue1007-v4-b-multiple-seeds-eval',merge=True,filter_algorithm=[ - 'issue1007-v4-b-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028) -] + [ - 'issue1007-v4-b-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028) -]) - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump'], - filter_algorithm=[ - 'issue1007-v4-b-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028) - ] + [ - 'issue1007-v4-b-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028) - ] + [ - 'issue1007-v5-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028) - ] + [ - 'issue1007-v5-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028) - ], - ), - outfile=os.path.join(exp.eval_dir, "average-v4-b-v5", "properties"), - name="report-average-v4-b-v5" -) - -exp.add_fetcher('data/issue1007-v5-multiple-seeds-eval/average-v4-b-v5', merge=True) -exp._configs = [ - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step( - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump'], - name="compare-v4-b-v5", - revisions=["issue1007-v4-b", "issue1007-v5"], -) - -exp.run_steps() diff --git a/experiments/issue1007/v5.py b/experiments/issue1007/v5.py deleted file mode 100755 index b1d1be2f64..0000000000 --- a/experiments/issue1007/v5.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v5"] -random_seed=2018 -CONFIGS = [ - ### single cegar - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]), - IssueConfig('cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]), - - ### multiple cegar - IssueConfig('cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)".format(random_seed)]), - IssueConfig('cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)".format(random_seed)]), - - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)".format(random_seed)]), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)".format(random_seed)]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() - -exp.add_fetcher('data/issue1007-v4-eval', merge=True) -exp._revisions = ["issue1007-v4", "issue1007-v5"] -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue1007/v6-multiple-seeds.py b/experiments/issue1007/v6-multiple-seeds.py deleted file mode 100755 index 05341f426d..0000000000 --- a/experiments/issue1007/v6-multiple-seeds.py +++ /dev/null @@ -1,81 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v5", "issue1007-v6"] -CONFIGS = [] -for random_seed in range(2018, 2028): - ### single cegar - CONFIGS.append(IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,wildcard_plans=false,max_time=20,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=20,stagnation_limit=4,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)".format(random_seed)], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step(attributes=['coverage']) - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump'] - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher('data/issue1007-v6-multiple-seeds-eval/average', merge=True) -exp._configs = [ - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step( - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump'] -) - -exp.run_steps() diff --git a/experiments/issue1007/v6.py b/experiments/issue1007/v6.py deleted file mode 100755 index 9d84938bff..0000000000 --- a/experiments/issue1007/v6.py +++ /dev/null @@ -1,67 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v5", "issue1007-v6"] -random_seed=2018 -CONFIGS = [ - ### single cegar - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]), - IssueConfig('cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]), - - ### multiple cegar - IssueConfig('cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)".format(random_seed)]), - IssueConfig('cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)".format(random_seed)]), - - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)".format(random_seed)]), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)".format(random_seed)]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue1007/v7-multiple-seeds.py b/experiments/issue1007/v7-multiple-seeds.py deleted file mode 100755 index 6807c9b59f..0000000000 --- a/experiments/issue1007/v7-multiple-seeds.py +++ /dev/null @@ -1,99 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v7"] -CONFIGS = [] -for random_seed in range(2018, 2028): - ### single cegar - CONFIGS.append(IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,wildcard_plans=false,max_time=20,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},total_max_time=20,stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)".format(random_seed)], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step(attributes=['coverage']) - -### compare against v6 -exp.add_fetcher('data/issue1007-v6-multiple-seeds-eval',merge=True,filter_algorithm=[ - 'issue1007-v6-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028) -] + [ - 'issue1007-v6-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028) -]) - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump'], - filter_algorithm=[ - 'issue1007-v6-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028) - ] + [ - 'issue1007-v6-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028) - ] + [ - 'issue1007-v7-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed) for random_seed in range(2018, 2028) - ] + [ - 'issue1007-v7-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{}'.format(random_seed) for random_seed in range(2018, 2028) - ], - ), - outfile=os.path.join(exp.eval_dir, "average-v6-v7", "properties"), - name="report-average-v6-v7" -) - -exp.add_fetcher('data/issue1007-v7-multiple-seeds-eval/average-v6-v7', merge=True) -exp._configs = [ - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step( - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump'], - name="compare-v6-v7", - revisions=["issue1007-v6", "issue1007-v7"], -) - -exp.run_steps() diff --git a/experiments/issue1007/v7-v7b-v7c-fixed-seed.py b/experiments/issue1007/v7-v7b-v7c-fixed-seed.py deleted file mode 100755 index cf5d261c11..0000000000 --- a/experiments/issue1007/v7-v7b-v7c-fixed-seed.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v7", "issue1007-v7b", "issue1007-v7c"] -random_seed=2018 -MAX_TIME=100 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [ - ### single cegar - IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - - ### multiple cegar - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), - - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue1007/v7-v7b-v7c.py b/experiments/issue1007/v7-v7b-v7c.py deleted file mode 100755 index 1d64d76ecf..0000000000 --- a/experiments/issue1007/v7-v7b-v7c.py +++ /dev/null @@ -1,90 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v7", "issue1007-v7b", "issue1007-v7c"] -MAX_TIME=20 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [] -for random_seed in range(2018, 2028): - ### single cegar - CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step(attributes=['coverage']) - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory'], - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True) -exp._configs = [ - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step_for_revision_pairs( - revision_pairs=[ - ("issue1007-v7", "issue1007-v7b"), - ("issue1007-v7b", "issue1007-v7c"), - ], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory'], -) - -exp.run_steps() diff --git a/experiments/issue1007/v7-v8-v8b-fixed-seed.py b/experiments/issue1007/v7-v8-v8b-fixed-seed.py deleted file mode 100755 index 3074019fae..0000000000 --- a/experiments/issue1007/v7-v8-v8b-fixed-seed.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v7", "issue1007-v8", "issue1007-v8b"] -random_seed=2018 -MAX_TIME=100 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [ - ### single cegar - IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - - ### multiple cegar - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), - - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue1007/v7-v8-v8b.py b/experiments/issue1007/v7-v8-v8b.py deleted file mode 100755 index c3c0eed76c..0000000000 --- a/experiments/issue1007/v7-v8-v8b.py +++ /dev/null @@ -1,90 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v7", "issue1007-v8", "issue1007-v8b"] -MAX_TIME=20 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [] -for random_seed in range(2018, 2028): - ### single cegar - CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step(attributes=['coverage']) - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory'], - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True) -exp._configs = [ - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step_for_revision_pairs( - revision_pairs=[ - ("issue1007-v7", "issue1007-v8"), - ("issue1007-v8", "issue1007-v8b"), - ], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory'], -) - -exp.run_steps() diff --git a/experiments/issue1007/v7c-v8-fixed-seed.py b/experiments/issue1007/v7c-v8-fixed-seed.py deleted file mode 100755 index 6678d57ed9..0000000000 --- a/experiments/issue1007/v7c-v8-fixed-seed.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v7c", "issue1007-v8"] -random_seed=2018 -MAX_TIME=100 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [ - ### single cegar - IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - - ### multiple cegar - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), - - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue1007/v7c-v8.py b/experiments/issue1007/v7c-v8.py deleted file mode 100755 index 09e318e44a..0000000000 --- a/experiments/issue1007/v7c-v8.py +++ /dev/null @@ -1,89 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v7c", "issue1007-v8"] -MAX_TIME=20 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [] -for random_seed in range(2018, 2028): - ### single cegar - CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step(attributes=['coverage']) - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory'], - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True) -exp._configs = [ - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step_for_revision_pairs( - revision_pairs=[ - ("issue1007-v7c", "issue1007-v8"), - ], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory'], -) - -exp.run_steps() diff --git a/experiments/issue1007/v8b-v8c-v8d-fixed-seed.py b/experiments/issue1007/v8b-v8c-v8d-fixed-seed.py deleted file mode 100755 index ba0a280aef..0000000000 --- a/experiments/issue1007/v8b-v8c-v8d-fixed-seed.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v8b", "issue1007-v8c", "issue1007-v8d"] -random_seed=2018 -MAX_TIME=100 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [ - ### single cegar - IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - - ### multiple cegar - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), - - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue1007/v8b-v8c-v8d.py b/experiments/issue1007/v8b-v8c-v8d.py deleted file mode 100755 index d8ae174dc8..0000000000 --- a/experiments/issue1007/v8b-v8c-v8d.py +++ /dev/null @@ -1,90 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v8b", "issue1007-v8c", "issue1007-v8d"] -MAX_TIME=20 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [] -for random_seed in range(2018, 2028): - ### single cegar - CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step(attributes=['coverage']) - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory'], - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True) -exp._configs = [ - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step_for_revision_pairs( - revision_pairs=[ - ("issue1007-v8b", "issue1007-v8c"), - ("issue1007-v8c", "issue1007-v8d"), - ], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory'], -) - -exp.run_steps() diff --git a/experiments/issue1007/v9-fixed-seed.py b/experiments/issue1007/v9-fixed-seed.py deleted file mode 100755 index 38753a03ef..0000000000 --- a/experiments/issue1007/v9-fixed-seed.py +++ /dev/null @@ -1,73 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v8c", "issue1007-v9"] -random_seed=2018 -MAX_TIME=100 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [ - ### ipdb - IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)']), - - ### single cegar - IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=true,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)']), - - ### multiple cegar - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), - - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,blacklist_on_stagnation=false,verbosity=normal)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue1007/v9-multiple-seeds.py b/experiments/issue1007/v9-multiple-seeds.py deleted file mode 100755 index e121cfd48c..0000000000 --- a/experiments/issue1007/v9-multiple-seeds.py +++ /dev/null @@ -1,93 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1007-v8c", "issue1007-v9"] -MAX_TIME=20 -if common_setup.is_test_run(): - MAX_TIME=1 -CONFIGS = [] -for random_seed in range(2018, 2028): - ### ipdb - CONFIGS.append(IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])), - - ### single cegar - CONFIGS.append(IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(wildcard_plans=false,max_time={MAX_TIME},max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - - ### multiple cegar - CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4-s{random_seed}', ['--search', f'astar(cpdbs(multiple_cegar(wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={random_seed},total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,blacklist_on_stagnation=true,verbosity=normal)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step(attributes=['coverage']) - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory'], - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True) -exp._configs = [ - IssueConfig('cpdbs-hillclimbing-pdb1m-pdbs10m-t20', []), - IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t20', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t20-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step_for_revision_pairs( - revision_pairs=[ - ("issue1007-v8c", "issue1007-v9"), - ], - attributes=['coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory'], -) - -exp.run_steps() diff --git a/experiments/issue1008/average_report.py b/experiments/issue1008/average_report.py deleted file mode 100644 index 5ab3a221d7..0000000000 --- a/experiments/issue1008/average_report.py +++ /dev/null @@ -1,82 +0,0 @@ -# -*- coding: utf-8 -*- - -from downward.reports import PlanningReport -from lab import tools -from lab.reports import geometric_mean - -import os - -DEBUG=False - -class AverageAlgorithmReport(PlanningReport): - """ - This currently only works for some hard-coded attributes. - """ - def __init__(self, algo_name_suffixes, **kwargs): - PlanningReport.__init__(self, **kwargs) - self.algo_name_suffixes=algo_name_suffixes - - def get_text(self): - if not self.outfile.endswith("properties"): - raise ValueError("outfile must be a path to a properties file") - algo_infixes = set() - for algo in self.algorithms: - for suffix in self.algo_name_suffixes: - if suffix in algo: - algo_infixes.add(algo.replace(suffix, '')) - break - # print(algo_infixes) - # print(self.algo_name_suffixes) - props = tools.Properties(self.outfile) - for domain, problem in self.problem_runs.keys(): - if DEBUG: - print(domain, problem) - for algo in algo_infixes: - if DEBUG: - print("Consider ", algo) - properties_key = algo + '-' + domain + '-' + problem - average_algo_dict = {} - average_algo_dict['algorithm'] = algo - average_algo_dict['domain'] = domain - average_algo_dict['problem'] = problem - average_algo_dict['id'] = [algo, domain, problem] - for attribute in self.attributes: - if DEBUG: - print("Consider ", attribute) - values = [] - for suffix in self.algo_name_suffixes: - real_algo = algo + suffix - # if DEBUG: - # print("Composed algo ", real_algo) - real_algo_run = self.runs[(domain, problem, real_algo)] - values.append(real_algo_run.get(attribute)) - if DEBUG: - print(values) - values_without_none = [value for value in values if value is not None] - if attribute in [ - 'coverage', - 'initial_h_value', - 'cegar_num_iterations', - 'cegar_num_patterns', - 'cegar_total_pdb_size', - 'cpdbs_num_iterations', - 'cpdbs_num_patterns', - 'cpdbs_total_pdb_size', - 'random_pattern_num_iterations', - 'random_pattern_num_patterns', - 'random_pattern_total_pdb_size', - ] or 'score' in attribute: - # if 'score' not in attribute: - # assert len(values_without_none) == 10 # does not hold for scores - average_value = sum(values_without_none)/float(len(values)) - elif 'time' in attribute or 'expansions' in attribute: - if len(values_without_none) == 10: - average_value = geometric_mean(values_without_none) - else: - average_value = None - else: - print("Don't know how to handle {}".format(attribute)) - exit(1) - average_algo_dict[attribute] = average_value - props[properties_key] = average_algo_dict - return str(props) diff --git a/experiments/issue1008/base-cegar-fixed-seed.py b/experiments/issue1008/base-cegar-fixed-seed.py deleted file mode 100755 index 9ff4aaa5c7..0000000000 --- a/experiments/issue1008/base-cegar-fixed-seed.py +++ /dev/null @@ -1,120 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1008-base"] -random_seed=2018 -MAX_TIME=100 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [ - ### single cegar - IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(max_pdb_size=1000000,max_collection_size=10000000,max_time={MAX_TIME},verbosity=normal,random_seed={random_seed},use_wildcard_plans=false)),verbosity=silent)']), - IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(max_pdb_size=1000000,max_collection_size=10000000,max_time={MAX_TIME},verbosity=normal,random_seed={random_seed},use_wildcard_plans=true)),verbosity=silent)']), - - ### multiple cegar - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal,use_wildcard_plans=false)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal,use_wildcard_plans=false)),verbosity=silent)"]), - - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal,use_wildcard_plans=true)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal,use_wildcard_plans=true)),verbosity=silent)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('cegar-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) -cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True) -cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True) -cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True) -cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True) -score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False) - -attributes = [ - cpdbs_num_patterns, - cpdbs_total_pdb_size, - cpdbs_computation_time, - score_cpdbs_computation_time, - cegar_num_iterations, - cegar_num_patterns, - cegar_total_pdb_size, - cegar_computation_time, - score_cegar_computation_time, -] -attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES) -attributes.append('initial_h_value') - -def add_computation_time_score(run): - """ - Convert cegar/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_absolute_report_step(attributes=attributes,filter=[add_computation_time_score]) - -exp.run_steps() diff --git a/experiments/issue1008/cegar-parser.py b/experiments/issue1008/cegar-parser.py deleted file mode 100755 index d196f4992c..0000000000 --- a/experiments/issue1008/cegar-parser.py +++ /dev/null @@ -1,12 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -# This matches output of both single and multiple CEGAR algorithms. -parser.add_pattern('cegar_num_iterations', 'CEGAR number of iterations: (\d+)', required=False, type=int) -parser.add_pattern('cegar_num_patterns', 'CEGAR number of patterns: (\d+)', required=False, type=int) -parser.add_pattern('cegar_total_pdb_size', 'CEGAR total PDB size: (\d+)', required=False, type=int) -parser.add_pattern('cegar_computation_time', 'CEGAR computation time: (.+)s', required=False, type=float) - -parser.parse() diff --git a/experiments/issue1008/common_setup.py b/experiments/issue1008/common_setup.py deleted file mode 100644 index eecf49e971..0000000000 --- a/experiments/issue1008/common_setup.py +++ /dev/null @@ -1,427 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, name="make-comparison-tables", revisions=[], **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - if not revisions: - revisions = self._revisions - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(name, make_comparison_tables) - self.add_step( - f"publish-{name}", publish_comparison_tables) - - def add_comparison_table_step_for_revision_pairs( - self, revision_pairs, name="make-comparison-tables-for-revision-pairs", **kwargs): - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in revision_pairs: - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in revision_pairs: - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(name, make_comparison_tables) - self.add_step( - f"publish-{name}", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue1008/cpdbs-parser.py b/experiments/issue1008/cpdbs-parser.py deleted file mode 100755 index 7e4154cb57..0000000000 --- a/experiments/issue1008/cpdbs-parser.py +++ /dev/null @@ -1,10 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('cpdbs_num_patterns', 'Canonical PDB heuristic number of patterns: (\d+)', required=False, type=int) -parser.add_pattern('cpdbs_total_pdb_size', 'Canonical PDB heuristic total PDB size: (\d+)', required=False, type=int) -parser.add_pattern('cpdbs_computation_time', 'Canonical PDB heuristic computation time: (.+)s', required=False, type=float) - -parser.parse() diff --git a/experiments/issue1008/random-pattern-parser.py b/experiments/issue1008/random-pattern-parser.py deleted file mode 100755 index 8c5c297637..0000000000 --- a/experiments/issue1008/random-pattern-parser.py +++ /dev/null @@ -1,12 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -# This matches output of both single and multiple random pattern algorithms. -parser.add_pattern('random_pattern_num_iterations', 'Random Pattern.* number of iterations: (\d+)', required=False, type=int) -parser.add_pattern('random_pattern_num_patterns', 'Random Pattern.* number of patterns: (\d+)', required=False, type=int) -parser.add_pattern('random_pattern_total_pdb_size', 'Random Pattern.* total PDB size: (\d+)', required=False, type=int) -parser.add_pattern('random_pattern_computation_time', 'Random Pattern.* computation time: (.+)s', required=False, type=float) - -parser.parse() diff --git a/experiments/issue1008/requirements.txt b/experiments/issue1008/requirements.txt deleted file mode 100644 index 6205d66ac6..0000000000 --- a/experiments/issue1008/requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -cycler==0.10.0 -kiwisolver==1.3.1 -lab==6.3 -matplotlib==3.3.4 -numpy==1.22.2 -Pillow==9.0.1 -pyparsing==2.4.7 -python-dateutil==2.8.1 -simplejson==3.17.2 -six==1.16.0 -txt2tags==3.7 diff --git a/experiments/issue1008/v1-cegar-fixed-seed.py b/experiments/issue1008/v1-cegar-fixed-seed.py deleted file mode 100755 index 22605d604f..0000000000 --- a/experiments/issue1008/v1-cegar-fixed-seed.py +++ /dev/null @@ -1,137 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1008-v1"] -random_seed=2018 -MAX_TIME=100 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [ - ### single cegar - IssueConfig(f'cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(max_pdb_size=1000000,max_collection_size=10000000,max_time={MAX_TIME},verbosity=normal,random_seed={random_seed},use_wildcard_plans=false)),verbosity=silent)']), - IssueConfig(f'cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(single_cegar(max_pdb_size=1000000,max_collection_size=10000000,max_time={MAX_TIME},verbosity=normal,random_seed={random_seed},use_wildcard_plans=true)),verbosity=silent)']), - - ### multiple cegar - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal,use_wildcard_plans=false)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal,use_wildcard_plans=false)),verbosity=silent)"]), - - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal,use_wildcard_plans=true)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal,use_wildcard_plans=true)),verbosity=silent)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('cegar-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) -cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True) -cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True) -cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True) -cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True) -score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False) - -attributes = [ - cpdbs_num_patterns, - cpdbs_total_pdb_size, - cpdbs_computation_time, - score_cpdbs_computation_time, - cegar_num_iterations, - cegar_num_patterns, - cegar_total_pdb_size, - cegar_computation_time, - score_cegar_computation_time, -] -attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES) -attributes.append('initial_h_value') - -def add_computation_time_score(run): - """ - Convert cegar/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_absolute_report_step(attributes=attributes,filter=[add_computation_time_score]) - -exp.add_fetcher('data/issue1008-base-cegar-fixed-seed-eval', filter_algorithm=[ - f'issue1008-base-cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', - f'issue1008-base-cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', - f'issue1008-base-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{random_seed}', - f'issue1008-base-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', - f'issue1008-base-cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', - f'issue1008-base-cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', -],merge=True) - -exp.add_comparison_table_step_for_revision_pairs( - revision_pairs=[ - ("issue1008-base", "issue1008-v1"), - ], - attributes=attributes, - filter=[add_computation_time_score], -) - -exp.run_steps() diff --git a/experiments/issue1008/v1-random-patterns-multiple-seeds-30m.py b/experiments/issue1008/v1-random-patterns-multiple-seeds-30m.py deleted file mode 100755 index 508e73e12a..0000000000 --- a/experiments/issue1008/v1-random-patterns-multiple-seeds-30m.py +++ /dev/null @@ -1,140 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1008-v1"] -MAX_TIME=100 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [] -for random_seed in range(2018, 2028): - ### single random pattern - CONFIGS.append(IssueConfig(f'srnd-bidi-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(pdb(random_pattern(max_pdb_size=1000000,max_time={MAX_TIME},verbosity=normal,random_seed={random_seed},bidirectional=true)),verbosity=silent)'])) - - ### multiple random patterns - CONFIGS.append(IssueConfig(f'mrnd-bidi-pdb1m-pdbs10m-t100-stag5-s{random_seed}', ['--search', f'astar(cpdbs(random_patterns(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,total_max_time={MAX_TIME},stagnation_limit=5,random_seed={random_seed},verbosity=normal,bidirectional=true)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('random-pattern-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) -random_pattern_num_iterations = Attribute('random_pattern_num_iterations', absolute=False, min_wins=True) -random_pattern_num_patterns = Attribute('random_pattern_num_patterns', absolute=False, min_wins=True) -random_pattern_total_pdb_size = Attribute('random_pattern_total_pdb_size', absolute=False, min_wins=True) -random_pattern_computation_time = Attribute('random_pattern_computation_time', absolute=False, min_wins=True) -score_random_pattern_computation_time = Attribute('score_random_pattern_computation_time', absolute=True, min_wins=False) - -# exp.add_absolute_report_step(attributes=['coverage']) - -def add_computation_time_score(run): - """ - Convert cegar/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_random_pattern_computation_time'] = log_score(run.get('random_pattern_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=[ - 'coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory', 'score_expansions', - 'initial_h_value', cpdbs_num_patterns, - cpdbs_total_pdb_size, cpdbs_computation_time, - score_cpdbs_computation_time, - random_pattern_num_iterations, random_pattern_num_patterns, - random_pattern_total_pdb_size, - random_pattern_computation_time, - score_random_pattern_computation_time, - ], - filter=[add_computation_time_score], - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True) -exp.add_absolute_report_step( - attributes=[ - 'coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory', 'score_expansions', - 'initial_h_value', cpdbs_num_patterns, cpdbs_total_pdb_size, - cpdbs_computation_time, score_cpdbs_computation_time, - random_pattern_num_iterations, random_pattern_num_patterns, - random_pattern_total_pdb_size, random_pattern_computation_time, - score_random_pattern_computation_time, - ], - filter=[add_computation_time_score], - filter_algorithm=[ - f'{REVISIONS[0]}-srnd-bidi-pdb1m-pdbs10m-t100', - f'{REVISIONS[0]}-mrnd-bidi-pdb1m-pdbs10m-t100-stag5', - ], -) - -exp.run_steps() diff --git a/experiments/issue1008/v1-random-patterns-multiple-seeds.py b/experiments/issue1008/v1-random-patterns-multiple-seeds.py deleted file mode 100755 index 02d67458b2..0000000000 --- a/experiments/issue1008/v1-random-patterns-multiple-seeds.py +++ /dev/null @@ -1,140 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1008-v1"] -MAX_TIME=20 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [] -for random_seed in range(2018, 2028): - ### single random pattern - CONFIGS.append(IssueConfig(f'srnd-bidi-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(pdb(random_pattern(max_pdb_size=1000000,max_time={MAX_TIME},verbosity=normal,random_seed={random_seed},bidirectional=true)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - - ### multiple random patterns - CONFIGS.append(IssueConfig(f'mrnd-bidi-pdb1m-pdbs10m-t20-stag1-s{random_seed}', ['--search', f'astar(cpdbs(random_patterns(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,total_max_time={MAX_TIME},stagnation_limit=1,random_seed={random_seed},verbosity=normal,bidirectional=true)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('random-pattern-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) -random_pattern_num_iterations = Attribute('random_pattern_num_iterations', absolute=False, min_wins=True) -random_pattern_num_patterns = Attribute('random_pattern_num_patterns', absolute=False, min_wins=True) -random_pattern_total_pdb_size = Attribute('random_pattern_total_pdb_size', absolute=False, min_wins=True) -random_pattern_computation_time = Attribute('random_pattern_computation_time', absolute=False, min_wins=True) -score_random_pattern_computation_time = Attribute('score_random_pattern_computation_time', absolute=True, min_wins=False) - -# exp.add_absolute_report_step(attributes=['coverage']) - -def add_computation_time_score(run): - """ - Convert cegar/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_random_pattern_computation_time'] = log_score(run.get('random_pattern_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=[ - 'coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory', 'score_expansions', - 'initial_h_value', cpdbs_num_patterns, - cpdbs_total_pdb_size, cpdbs_computation_time, - score_cpdbs_computation_time, - random_pattern_num_iterations, random_pattern_num_patterns, - random_pattern_total_pdb_size, - random_pattern_computation_time, - score_random_pattern_computation_time, - ], - filter=[add_computation_time_score], - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True) -exp.add_absolute_report_step( - attributes=[ - 'coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory', 'score_expansions', - 'initial_h_value', cpdbs_num_patterns, cpdbs_total_pdb_size, - cpdbs_computation_time, score_cpdbs_computation_time, - random_pattern_num_iterations, random_pattern_num_patterns, - random_pattern_total_pdb_size, random_pattern_computation_time, - score_random_pattern_computation_time, - ], - filter=[add_computation_time_score], - filter_algorithm=[ - f'{REVISIONS[0]}-srnd-bidi-pdb1m-pdbs10m-t20', - f'{REVISIONS[0]}-mrnd-bidi-pdb1m-pdbs10m-t20-stag1', - ], -) - -exp.run_steps() diff --git a/experiments/issue1008/v2-cegar-fixed-seed.py b/experiments/issue1008/v2-cegar-fixed-seed.py deleted file mode 100755 index 56a9e9c18d..0000000000 --- a/experiments/issue1008/v2-cegar-fixed-seed.py +++ /dev/null @@ -1,121 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1008-v1", "issue1008-v2"] -random_seed=2018 -MAX_TIME=100 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [ - ### multiple cegar - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal,use_wildcard_plans=true)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal,use_wildcard_plans=true)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=false,verbosity=normal,use_wildcard_plans=true)),verbosity=silent)"]), - IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal,use_wildcard_plans=true)),verbosity=silent)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('cegar-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) -cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True) -cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True) -cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True) -cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True) -score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False) - -attributes = [ - cpdbs_num_patterns, - cpdbs_total_pdb_size, - cpdbs_computation_time, - score_cpdbs_computation_time, - cegar_num_iterations, - cegar_num_patterns, - cegar_total_pdb_size, - cegar_computation_time, - score_cegar_computation_time, -] -attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES) -attributes.append('initial_h_value') - -def add_computation_time_score(run): - """ - Convert cegar/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_cegar_computation_time'] = log_score(run.get('cegar_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_absolute_report_step(attributes=attributes,filter=[add_computation_time_score]) - - -exp.add_comparison_table_step( - attributes=attributes, - filter=[add_computation_time_score], -) - -exp.run_steps() diff --git a/experiments/issue1008/v2-cegar-multiple-seeds.py b/experiments/issue1008/v2-cegar-multiple-seeds.py deleted file mode 100755 index 42431dae32..0000000000 --- a/experiments/issue1008/v2-cegar-multiple-seeds.py +++ /dev/null @@ -1,140 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1008-v1", "issue1008-v2"] -MAX_TIME=20 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [] -for random_seed in range(2018, 2028): - ### multiple cegar - # CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal,use_wildcard_plans=true)),verbosity=silent)"], driver_options=['--search-time-limit', '5m'])) - CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-stag4-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal,use_wildcard_plans=true)),verbosity=silent)"], driver_options=['--search-time-limit', '5m'])) - # CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=false,verbosity=normal,use_wildcard_plans=true)),verbosity=silent)"], driver_options=['--search-time-limit', '5m'])) - CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal,use_wildcard_plans=true)),verbosity=silent)"], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('cegar-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) -cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True) -cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True) -cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True) -cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True) -score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False) - -attributes = [ - cpdbs_num_patterns, - cpdbs_total_pdb_size, - cpdbs_computation_time, - score_cpdbs_computation_time, - cegar_num_iterations, - cegar_num_patterns, - cegar_total_pdb_size, - cegar_computation_time, - score_cegar_computation_time, -] - -attributes=[ - 'coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory', 'score_expansions', - 'initial_h_value', cpdbs_num_patterns, cpdbs_total_pdb_size, - cpdbs_computation_time, score_cpdbs_computation_time, - cegar_num_iterations, cegar_num_patterns, cegar_total_pdb_size, - cegar_computation_time, score_cegar_computation_time, -] - -def add_computation_time_score(run): - """ - Convert cegar/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_random_pattern_computation_time'] = log_score(run.get('random_pattern_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=attributes, - filter=[add_computation_time_score], - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True) -exp._configs = [ - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-stag4', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step( - attributes=attributes, - filter=[add_computation_time_score], -) - -exp.run_steps() diff --git a/experiments/issue1008/v3-cegar-multiple-seeds.py b/experiments/issue1008/v3-cegar-multiple-seeds.py deleted file mode 100755 index e02f4c534e..0000000000 --- a/experiments/issue1008/v3-cegar-multiple-seeds.py +++ /dev/null @@ -1,128 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1008-v2", "issue1008-v3"] -MAX_TIME=20 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [] -for random_seed in range(2018, 2028): - ### multiple cegar - CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal,use_wildcard_plans=true)),verbosity=silent)"], driver_options=['--search-time-limit', '5m'])) - # CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-stag4-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=1,enable_blacklist_on_stagnation=false,verbosity=normal,use_wildcard_plans=true)),verbosity=silent)"], driver_options=['--search-time-limit', '5m'])) - # CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=infinity,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=false,verbosity=normal,use_wildcard_plans=true)),verbosity=silent)"], driver_options=['--search-time-limit', '5m'])) - CONFIGS.append(IssueConfig(f'cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4-s{random_seed}', ['--search', f"astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,random_seed={random_seed},max_collection_size=10000000,total_max_time={MAX_TIME},stagnation_limit=4,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,verbosity=normal,use_wildcard_plans=true)),verbosity=silent)"], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('cegar-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) -cegar_num_iterations = Attribute('cegar_num_iterations', absolute=False, min_wins=True) -cegar_num_patterns = Attribute('cegar_num_patterns', absolute=False, min_wins=True) -cegar_total_pdb_size = Attribute('cegar_total_pdb_size', absolute=False, min_wins=True) -cegar_computation_time = Attribute('cegar_computation_time', absolute=False, min_wins=True) -score_cegar_computation_time = Attribute('score_cegar_computation_time', absolute=True, min_wins=False) - -attributes=[ - 'coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory', 'score_expansions', - 'initial_h_value', cpdbs_num_patterns, cpdbs_total_pdb_size, - cpdbs_computation_time, score_cpdbs_computation_time, - cegar_num_iterations, cegar_num_patterns, cegar_total_pdb_size, - cegar_computation_time, score_cegar_computation_time, -] - -def add_computation_time_score(run): - """ - Convert cegar/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_random_pattern_computation_time'] = log_score(run.get('random_pattern_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=attributes, - filter=[add_computation_time_score], - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True) -exp._configs = [ - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100', []), - IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag4', []), -] -exp.add_comparison_table_step( - attributes=attributes, - filter=[add_computation_time_score], -) - -exp.run_steps() diff --git a/experiments/issue1008/v4-random-patterns-multiple-seeds.py b/experiments/issue1008/v4-random-patterns-multiple-seeds.py deleted file mode 100755 index d532b723e4..0000000000 --- a/experiments/issue1008/v4-random-patterns-multiple-seeds.py +++ /dev/null @@ -1,130 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from average_report import AverageAlgorithmReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1008-v3", "issue1008-v4"] -MAX_TIME=20 -if common_setup.is_test_run(): - MAX_TIME=2 -CONFIGS = [] -for random_seed in range(2018, 2028): - ### single random pattern - CONFIGS.append(IssueConfig(f'srnd-bidi-pdb1m-pdbs10m-t20-s{random_seed}', ['--search', f'astar(pdb(random_pattern(max_pdb_size=1000000,max_time={MAX_TIME},verbosity=normal,random_seed={random_seed},bidirectional=true)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - - ### multiple random patterns - CONFIGS.append(IssueConfig(f'mrnd-bidi-pdb1m-pdbs10m-t20-stag1-s{random_seed}', ['--search', f'astar(cpdbs(random_patterns(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,total_max_time={MAX_TIME},stagnation_limit=1,random_seed={random_seed},verbosity=normal,bidirectional=true)),verbosity=silent)'], driver_options=['--search-time-limit', '5m'])) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parser('cpdbs-parser.py') -exp.add_parser('random-pattern-parser.py') - -cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True) -cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True) -cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True) -score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False) -random_pattern_num_iterations = Attribute('random_pattern_num_iterations', absolute=False, min_wins=True) -random_pattern_num_patterns = Attribute('random_pattern_num_patterns', absolute=False, min_wins=True) -random_pattern_total_pdb_size = Attribute('random_pattern_total_pdb_size', absolute=False, min_wins=True) -random_pattern_computation_time = Attribute('random_pattern_computation_time', absolute=False, min_wins=True) -score_random_pattern_computation_time = Attribute('score_random_pattern_computation_time', absolute=True, min_wins=False) - -attributes=[ - 'coverage', 'search_time', 'total_time', - 'expansions_until_last_jump', 'score_search_time', - 'score_total_time', 'score_memory', 'score_expansions', - 'initial_h_value', cpdbs_num_patterns, cpdbs_total_pdb_size, - cpdbs_computation_time, score_cpdbs_computation_time, - random_pattern_num_iterations, random_pattern_num_patterns, random_pattern_total_pdb_size, - random_pattern_computation_time, score_random_pattern_computation_time, -] - -exp.add_absolute_report_step(attributes=['coverage']) - -def add_computation_time_score(run): - """ - Convert random_pattern/cpdbs computation time into scores in the range [0, 1]. - - Best possible performance in a task is counted as 1, while failure - to construct the heuristic and worst performance are counted as 0. - - """ - def log_score(value, min_bound, max_bound): - assert min_bound < max_bound - if value is None: - return 0 - value = max(value, min_bound) - value = min(value, max_bound) - raw_score = math.log(value) - math.log(max_bound) - best_raw_score = math.log(min_bound) - math.log(max_bound) - return raw_score / best_raw_score - - run['score_random_pattern_computation_time'] = log_score(run.get('random_pattern_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME) - return run - -exp.add_report( - AverageAlgorithmReport( - algo_name_suffixes=['-s{}'.format(seed) for seed in range(2018,2028)], - attributes=attributes, - filter=[add_computation_time_score], - ), - outfile=os.path.join(exp.eval_dir, "average", "properties"), - name="report-average" -) - -exp.add_fetcher(os.path.join(exp.eval_dir, 'average'), merge=True) -exp._configs = [ - IssueConfig('srnd-bidi-pdb1m-pdbs10m-t20', []), - IssueConfig('mrnd-bidi-pdb1m-pdbs10m-t20-stag1', []), -] -exp.add_comparison_table_step( - attributes=attributes, - filter=[add_computation_time_score], -) - -exp.run_steps() diff --git a/experiments/issue1009/common_setup.py b/experiments/issue1009/common_setup.py deleted file mode 100644 index ae9af26890..0000000000 --- a/experiments/issue1009/common_setup.py +++ /dev/null @@ -1,395 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, lambda: make_scatter_plots) diff --git a/experiments/issue1009/landmark_parser.py b/experiments/issue1009/landmark_parser.py deleted file mode 100755 index 943492471b..0000000000 --- a/experiments/issue1009/landmark_parser.py +++ /dev/null @@ -1,29 +0,0 @@ -#! /usr/bin/env python - -import re - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern( - "lmgraph_generation_time", - r"Landmark graph generation time: (.+)s", - type=float) -parser.add_pattern( - "landmarks", - r"Landmark graph contains (\d+) landmarks, of which \d+ are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_disjunctive", - r"Landmark graph contains \d+ landmarks, of which (\d+) are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_conjunctive", - r"Landmark graph contains \d+ landmarks, of which \d+ are disjunctive and (\d+) are conjunctive.", - type=int) -parser.add_pattern( - "orderings", - r"Landmark graph contains (\d+) orderings.", - type=int) - -parser.parse() diff --git a/experiments/issue1009/requirements.txt b/experiments/issue1009/requirements.txt deleted file mode 100644 index 07c765f578..0000000000 --- a/experiments/issue1009/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -cycler==0.11.0 -fonttools==4.34.4 -kiwisolver==1.4.3 -lab==7.1 -matplotlib==3.5.2 -numpy==1.22.0 -packaging==21.3 -Pillow==9.2.0 -pyparsing==3.0.9 -python-dateutil==2.8.2 -simplejson==3.17.6 -six==1.16.0 -txt2tags==3.7 -typing_extensions==4.3.0 diff --git a/experiments/issue1009/v1-optimal.py b/experiments/issue1009/v1-optimal.py deleted file mode 100755 index a7d4f8822a..0000000000 --- a/experiments/issue1009/v1-optimal.py +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -REVISIONS = [ - "issue1009-base", - "issue1009-v1", - "issue1009-v2", -] - -CONFIGS = [ - common_setup.IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), - common_setup.IssueConfig( - "lm-exhaust", ["--evaluator", - "lmc=lmcount(lm_exhaust(),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - common_setup.IssueConfig( - "lm-hm2", ["--evaluator", - "lmc=lmcount(lm_hm(m=2),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="remo.christen@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_comparison_table_step(attributes=ATTRIBUTES) -exp.add_parse_again_step() - -exp.run_steps() diff --git a/experiments/issue1009/v1-satisficing.py b/experiments/issue1009/v1-satisficing.py deleted file mode 100755 index 4541bbb5ec..0000000000 --- a/experiments/issue1009/v1-satisficing.py +++ /dev/null @@ -1,130 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -import itertools - -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -CONFIGS_COMMON = [ - common_setup.IssueConfig( - "lama-first", [], driver_options=["--alias", "lama-first"]), - common_setup.IssueConfig( - "lama-first-pref", ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), - common_setup.IssueConfig( - "lama-second", ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(plusone))", - "--evaluator", "hff=ff(transform=adapt_costs(plusone))", - "--search", - """lazy_greedy([hff,hlm],preferred=[hff,hlm], reopen_closed=false)"""]), -] -CONFIGS_OLD = [ - common_setup.IssueConfig( - "lm-merged", ["--search", - "eager_greedy([lmcount(lm_merged([lm_exhaust(), lm_zg()]))])"]), - common_setup.IssueConfig( - "lm-exhaust", ["--search", - "eager_greedy([lmcount(lm_exhaust())])"]), - common_setup.IssueConfig( - "lm-zg", ["--search", - "eager_greedy([lmcount(lm_zg())])"]), - common_setup.IssueConfig( - "lm-hm", ["--search", - "eager_greedy([lmcount(lm_hm(m=2))])"]), -] -CONFIGS_NEW = [ - common_setup.IssueConfig( - "lm-merged", ["--search", - "eager_greedy([lmcount(lm_merged([lm_exhaust(), lm_zg()]), transform=adapt_costs(one))])"]), - common_setup.IssueConfig( - "lm-exhaust", ["--search", - "eager_greedy([lmcount(lm_exhaust(), transform=adapt_costs(one))])"]), - common_setup.IssueConfig( - "lm-zg", ["--search", - "eager_greedy([lmcount(lm_zg(), transform=adapt_costs(one))])"]), - common_setup.IssueConfig( - "lm-hm", ["--search", - "eager_greedy([lmcount(lm_hm(m=2), transform=adapt_costs(one))])"]), -] - -revconf_base = ("issue1009-base", CONFIGS_COMMON + CONFIGS_OLD) -revconf_v1 = ("issue1009-v1", CONFIGS_COMMON + CONFIGS_NEW) -revconf_v2 = ("issue1009-v2", CONFIGS_COMMON + CONFIGS_NEW) -REVCONFS = [revconf_base, revconf_v1, revconf_v2] - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="remo.christen@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment(revisions=[], configs=[], environment=ENVIRONMENT) - -# Manually add relevant revision/config combinations to algorithms -for revconv in REVCONFS: - for config in revconv[1]: - exp.add_algorithm( - common_setup.get_algo_nick(revconv[0], config.nick), - common_setup.get_repo_base(), - revconv[0], - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") - -def make_comparison_tables(): - for revconf1, revconf2 in itertools.combinations(REVCONFS, 2): - compared_configs = [] - for config1, config2 in zip(revconf1[1], revconf2[1]): - compared_configs.append( - ("{0}-{1}".format(revconf1[0], config1.nick), - "{0}-{1}".format(revconf2[0], config2.nick))) - report = common_setup.ComparativeReport(compared_configs, attributes=ATTRIBUTES) - outfile = os.path.join( - exp.eval_dir, - "{0}-{1}-{2}-compare.{3}".format(exp.name, revconf1[0], revconf2[0], report.output_format)) - report(exp.eval_dir, outfile) - -exp.add_step("make-comparison-tables", make_comparison_tables) -exp.add_parse_again_step() - -exp.run_steps() diff --git a/experiments/issue1009/v2-satisficing.py b/experiments/issue1009/v2-satisficing.py deleted file mode 100755 index e141da4ad4..0000000000 --- a/experiments/issue1009/v2-satisficing.py +++ /dev/null @@ -1,129 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -import itertools - -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -CONFIGS_COMMON = [ - common_setup.IssueConfig( - "lama-first", [], driver_options=["--alias", "lama-first"]), - common_setup.IssueConfig( - "lama-first-pref", ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), - common_setup.IssueConfig( - "lama-second", ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(plusone))", - "--evaluator", "hff=ff(transform=adapt_costs(plusone))", - "--search", - """lazy_greedy([hff,hlm],preferred=[hff,hlm], reopen_closed=false)"""]), -] -CONFIGS_OLD = [ - common_setup.IssueConfig( - "lm-merged", ["--search", - "eager_greedy([lmcount(lm_merged([lm_exhaust(), lm_zg()]))])"]), - common_setup.IssueConfig( - "lm-exhaust", ["--search", - "eager_greedy([lmcount(lm_exhaust())])"]), - common_setup.IssueConfig( - "lm-zg", ["--search", - "eager_greedy([lmcount(lm_zg())])"]), - common_setup.IssueConfig( - "lm-hm", ["--search", - "eager_greedy([lmcount(lm_hm(m=2))])"]), -] -CONFIGS_NEW = [ - common_setup.IssueConfig( - "lm-merged", ["--search", - "eager_greedy([lmcount(lm_merged([lm_exhaust(), lm_zg()]), transform=adapt_costs(one))])"]), - common_setup.IssueConfig( - "lm-exhaust", ["--search", - "eager_greedy([lmcount(lm_exhaust(), transform=adapt_costs(one))])"]), - common_setup.IssueConfig( - "lm-zg", ["--search", - "eager_greedy([lmcount(lm_zg(), transform=adapt_costs(one))])"]), - common_setup.IssueConfig( - "lm-hm", ["--search", - "eager_greedy([lmcount(lm_hm(m=2), transform=adapt_costs(one))])"]), -] - -revconf_base = ("issue1009-base", CONFIGS_COMMON + CONFIGS_OLD) -revconf_v3 = ("issue1009-v3", CONFIGS_COMMON + CONFIGS_NEW) -REVCONFS = [revconf_base, revconf_v3] - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="remo.christen@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment(revisions=[], configs=[], environment=ENVIRONMENT) - -# Manually add relevant revision/config combinations to algorithms -for revconv in REVCONFS: - for config in revconv[1]: - exp.add_algorithm( - common_setup.get_algo_nick(revconv[0], config.nick), - common_setup.get_repo_base(), - revconv[0], - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") - -def make_comparison_tables(): - for revconf1, revconf2 in itertools.combinations(REVCONFS, 2): - compared_configs = [] - for config1, config2 in zip(revconf1[1], revconf2[1]): - compared_configs.append( - ("{0}-{1}".format(revconf1[0], config1.nick), - "{0}-{1}".format(revconf2[0], config2.nick))) - report = common_setup.ComparativeReport(compared_configs, attributes=ATTRIBUTES) - outfile = os.path.join( - exp.eval_dir, - "{0}-{1}-{2}-compare.{3}".format(exp.name, revconf1[0], revconf2[0], report.output_format)) - report(exp.eval_dir, outfile) - -exp.add_step("make-comparison-tables", make_comparison_tables) -exp.add_parse_again_step() - -exp.run_steps() diff --git a/experiments/issue1018/common_setup.py b/experiments/issue1018/common_setup.py deleted file mode 100644 index f5be072ae3..0000000000 --- a/experiments/issue1018/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue1018/requirements.txt b/experiments/issue1018/requirements.txt deleted file mode 100644 index a6f6168007..0000000000 --- a/experiments/issue1018/requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -cycler==0.10.0 -kiwisolver==1.3.1 -lab==6.3 -matplotlib==3.3.4 -numpy==1.22.2 -Pillow==9.0.1 -pyparsing==2.4.7 -python-dateutil==2.8.1 -simplejson==3.17.2 -six==1.15.0 -txt2tags==3.7 diff --git a/experiments/issue1018/v1.py b/experiments/issue1018/v1.py deleted file mode 100755 index e9ad10ccc2..0000000000 --- a/experiments/issue1018/v1.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1018-base", "issue1018-v1"] -BUILDS = ["release"] -CONFIG_NICKS = [ - ('pdb-greedy', ['--search', 'astar(pdb(greedy()))']), - ('cpdbs-hct900', ['--search', 'astar(cpdbs(hillclimbing(max_time=900)))']), - ('zopdbs-ga', ['--search', 'astar(zopdbs(genetic()))']), -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="silvan.sievers@unibas.ch", - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -exp.add_comparison_table_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue1041/common_setup.py b/experiments/issue1041/common_setup.py deleted file mode 100644 index eeca3aadb5..0000000000 --- a/experiments/issue1041/common_setup.py +++ /dev/null @@ -1,395 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, lambda: make_scatter_plots) diff --git a/experiments/issue1041/landmark_parser.py b/experiments/issue1041/landmark_parser.py deleted file mode 100755 index 943492471b..0000000000 --- a/experiments/issue1041/landmark_parser.py +++ /dev/null @@ -1,29 +0,0 @@ -#! /usr/bin/env python - -import re - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern( - "lmgraph_generation_time", - r"Landmark graph generation time: (.+)s", - type=float) -parser.add_pattern( - "landmarks", - r"Landmark graph contains (\d+) landmarks, of which \d+ are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_disjunctive", - r"Landmark graph contains \d+ landmarks, of which (\d+) are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_conjunctive", - r"Landmark graph contains \d+ landmarks, of which \d+ are disjunctive and (\d+) are conjunctive.", - type=int) -parser.add_pattern( - "orderings", - r"Landmark graph contains (\d+) orderings.", - type=int) - -parser.parse() diff --git a/experiments/issue1041/v1-optimal.py b/experiments/issue1041/v1-optimal.py deleted file mode 100755 index 5783745af2..0000000000 --- a/experiments/issue1041/v1-optimal.py +++ /dev/null @@ -1,88 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue1041-base-seq-opt-bjolp", "issue1041-v1-seq-opt-bjolp"), - ("issue1041-base-seq-opt-bjolp-opt", "issue1041-v1-seq-opt-bjolp-opt"), - ("issue1041-base-lm-exhaust", "issue1041-v1-lm-exhaust"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - - exp.add_report(report) - - -REVISIONS = [ - "issue1041-base", - "issue1041-v1", -] - -CONFIGS = [ - common_setup.IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), - common_setup.IssueConfig( - "lm-exhaust", ["--evaluator", - "lmc=lmcount(lm_exhaust(),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - common_setup.IssueConfig( - "seq-opt-bjolp-opt", ["--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1041/v1-satisficing.py b/experiments/issue1041/v1-satisficing.py deleted file mode 100755 index b11152dbd5..0000000000 --- a/experiments/issue1041/v1-satisficing.py +++ /dev/null @@ -1,89 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup - -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue1041-base-lama-first", "issue1041-v1-lama-first"), - ("issue1041-base-lama-first-pref", "issue1041-v1-lama-first-pref"), - ("issue1041-base-lm-zg", "issue1041-v1-lm-zg"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - -REVISIONS = [ - "issue1041-base", - "issue1041-v1", -] - -CONFIGS = [ - common_setup.IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), - common_setup.IssueConfig( - "lama-first-pref", ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), - common_setup.IssueConfig("lm-zg", [ - "--search", "eager_greedy([lmcount(lm_zg())])"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1042/base.py b/experiments/issue1042/base.py deleted file mode 100755 index 0fb4dfb68e..0000000000 --- a/experiments/issue1042/base.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1042-base"] -CONFIGS = [ - IssueConfig(f'astar-lmcut', ['--search', f'astar(lmcut())'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple', ['--search', f'astar(lmcut(),pruning=stubborn_sets_simple)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple-limited', ['--search', f'astar(lmcut(),pruning=stubborn_sets_simple(min_required_pruning_ratio=0.2,expansions_before_checking_pruning_ratio=1000))'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom', ['--search', f'astar(lmcut(),pruning=atom_centric_stubborn_sets)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom-limited', ['--search', f'astar(lmcut(),pruning=atom_centric_stubborn_sets(min_required_pruning_ratio=0.2,expansions_before_checking_pruning_ratio=1000))'], driver_options=['--search-time-limit', '5m']), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes = list(exp.DEFAULT_TABLE_ATTRIBUTES) -attributes.append('initial_h_value') - -exp.add_absolute_report_step(attributes=attributes) - -exp.add_fetcher('data/issue1042-v1-eval', merge=True) -exp.add_comparison_table_step(attributes=attributes, revisions=["issue1042-base", "issue1042-v1"]) - -exp.run_steps() diff --git a/experiments/issue1042/common_setup.py b/experiments/issue1042/common_setup.py deleted file mode 100644 index 9a0ddc07fe..0000000000 --- a/experiments/issue1042/common_setup.py +++ /dev/null @@ -1,396 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions_and_configs=None, path=None, **kwargs): - """ - - If given, *revisions_and_configs* must be a non-empty list of - pairs (tuple of size 2) of revisions and configs, with the - meaning to run all configs on all revisions. - - The first element of the pair, revisions, must be a non-empty - list of revision identifiers, which specify which planner - versions to use in the experiment. The same versions are used - for translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - The second element of the pair, configs, must be a non-empty - list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - revs = set() - confs = set() - for revisions, configs in revisions_and_configs: - for rev in revisions: - revs.add(rev) - for config in configs: - confs.add(config.nick) - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = list(revs) - self._config_nicks = list(confs) - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, name="make-comparison-tables", revisions=[], **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - if not revisions: - revisions = self._revisions - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(revisions, 2): - compared_configs = [] - for config_nick in self._config_nicks: - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(name, make_comparison_tables) - self.add_step( - f"publish-{name}", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config_nick, attributes): - make_scatter_plot(config_nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue1042/requirements.in b/experiments/issue1042/requirements.in deleted file mode 100644 index 3157755e80..0000000000 --- a/experiments/issue1042/requirements.in +++ /dev/null @@ -1 +0,0 @@ -lab==7.0 diff --git a/experiments/issue1042/requirements.txt b/experiments/issue1042/requirements.txt deleted file mode 100644 index a98c103c40..0000000000 --- a/experiments/issue1042/requirements.txt +++ /dev/null @@ -1,34 +0,0 @@ -# -# This file is autogenerated by pip-compile with python 3.8 -# To update, run: -# -# pip-compile requirements.in -# -cycler==0.11.0 - # via matplotlib -fonttools==4.29.1 - # via matplotlib -kiwisolver==1.3.2 - # via matplotlib -lab==7.0 - # via -r requirements.in -matplotlib==3.5.1 - # via lab -numpy==1.22.2 - # via matplotlib -packaging==21.3 - # via matplotlib -pillow==9.0.1 - # via matplotlib -pyparsing==3.0.7 - # via - # matplotlib - # packaging -python-dateutil==2.8.2 - # via matplotlib -simplejson==3.17.6 - # via lab -six==1.16.0 - # via python-dateutil -txt2tags==3.7 - # via lab diff --git a/experiments/issue1042/v1.py b/experiments/issue1042/v1.py deleted file mode 100755 index b6cbcbbccc..0000000000 --- a/experiments/issue1042/v1.py +++ /dev/null @@ -1,67 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1042-v1"] -CONFIGS = [ - IssueConfig(f'astar-lmcut', ['--search', f'astar(lmcut())'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple', ['--search', f'astar(lmcut(),pruning=stubborn_sets_simple)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple-limited', ['--search', f'astar(lmcut(),pruning=limited(pruning=stubborn_sets_simple,min_required_pruning_ratio=0.2,expansions_before_checking_pruning_ratio=1000))'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom', ['--search', f'astar(lmcut(),pruning=atom_centric_stubborn_sets)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom-limited', ['--search', f'astar(lmcut(),pruning=limited(pruning=atom_centric_stubborn_sets,min_required_pruning_ratio=0.2,expansions_before_checking_pruning_ratio=1000))'], driver_options=['--search-time-limit', '5m']), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes = list(exp.DEFAULT_TABLE_ATTRIBUTES) -attributes.append('initial_h_value') - -exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue1042/v2.py b/experiments/issue1042/v2.py deleted file mode 100755 index ab1f16df2e..0000000000 --- a/experiments/issue1042/v2.py +++ /dev/null @@ -1,80 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS_AND_CONFIGS = [ - ( - ["issue1042-v2"], - [ - IssueConfig(f'astar-lmcut', ['--search', f'astar(lmcut())'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple', ['--search', f'astar(lmcut(),pruning=stubborn_sets_simple)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple-limited', ['--search', f'astar(lmcut(),pruning=limited_pruning(pruning=stubborn_sets_simple,min_required_pruning_ratio=0.2,expansions_before_checking_pruning_ratio=1000))'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom', ['--search', f'astar(lmcut(),pruning=atom_centric_stubborn_sets)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom-limited', ['--search', f'astar(lmcut(),pruning=limited_pruning(pruning=atom_centric_stubborn_sets,min_required_pruning_ratio=0.2,expansions_before_checking_pruning_ratio=1000))'], driver_options=['--search-time-limit', '5m']), - ], - ), - ( - ["issue1042-base"], - [ - IssueConfig(f'astar-lmcut', ['--search', f'astar(lmcut())'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple', ['--search', f'astar(lmcut(),pruning=stubborn_sets_simple)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple-limited', ['--search', f'astar(lmcut(),pruning=stubborn_sets_simple(min_required_pruning_ratio=0.2,expansions_before_checking_pruning_ratio=1000))'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom', ['--search', f'astar(lmcut(),pruning=atom_centric_stubborn_sets)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom-limited', ['--search', f'astar(lmcut(),pruning=atom_centric_stubborn_sets(min_required_pruning_ratio=0.2,expansions_before_checking_pruning_ratio=1000))'], driver_options=['--search-time-limit', '5m']), - ], - ) -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions_and_configs=REVISIONS_AND_CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes = list(exp.DEFAULT_TABLE_ATTRIBUTES) -attributes.append('initial_h_value') - -exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue1042/v3.py b/experiments/issue1042/v3.py deleted file mode 100755 index e63ed75130..0000000000 --- a/experiments/issue1042/v3.py +++ /dev/null @@ -1,80 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS_AND_CONFIGS = [ - ( - ["issue1042-v3"], - [ - IssueConfig(f'astar-lmcut', ['--search', f'astar(lmcut())'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple', ['--search', f'astar(lmcut(),pruning=stubborn_sets_simple)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple-limited', ['--search', f'astar(lmcut(),pruning=limited_pruning(pruning=stubborn_sets_simple,min_required_pruning_ratio=0.2,expansions_before_checking_pruning_ratio=1000))'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom', ['--search', f'astar(lmcut(),pruning=atom_centric_stubborn_sets)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom-limited', ['--search', f'astar(lmcut(),pruning=limited_pruning(pruning=atom_centric_stubborn_sets,min_required_pruning_ratio=0.2,expansions_before_checking_pruning_ratio=1000))'], driver_options=['--search-time-limit', '5m']), - ], - ), - ( - ["issue1042-base"], - [ - IssueConfig(f'astar-lmcut', ['--search', f'astar(lmcut())'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple', ['--search', f'astar(lmcut(),pruning=stubborn_sets_simple)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple-limited', ['--search', f'astar(lmcut(),pruning=stubborn_sets_simple(min_required_pruning_ratio=0.2,expansions_before_checking_pruning_ratio=1000))'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom', ['--search', f'astar(lmcut(),pruning=atom_centric_stubborn_sets)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom-limited', ['--search', f'astar(lmcut(),pruning=atom_centric_stubborn_sets(min_required_pruning_ratio=0.2,expansions_before_checking_pruning_ratio=1000))'], driver_options=['--search-time-limit', '5m']), - ], - ) -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions_and_configs=REVISIONS_AND_CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes = list(exp.DEFAULT_TABLE_ATTRIBUTES) -attributes.append('initial_h_value') - -exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue1044/common_setup.py b/experiments/issue1044/common_setup.py deleted file mode 100644 index eeca3aadb5..0000000000 --- a/experiments/issue1044/common_setup.py +++ /dev/null @@ -1,395 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, lambda: make_scatter_plots) diff --git a/experiments/issue1044/landmark_parser.py b/experiments/issue1044/landmark_parser.py deleted file mode 100755 index d32ec08819..0000000000 --- a/experiments/issue1044/landmark_parser.py +++ /dev/null @@ -1,29 +0,0 @@ -#! /usr/bin/env python - -import re - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern( - "lmgraph_generation_time", - r"Landmark graph generation time: (.+)s", - type=float) -parser.add_pattern( - "landmarks", - r"Landmark graph contains (\d+) landmarks, of which \d+ are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_disjunctive", - r"Landmark graph contains \d+ landmarks, of which (\d+) are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_conjunctive", - r"Landmark graph contains \d+ landmarks, of which \d+ are disjunctive and (\d+) are conjunctive.", - type=int) -parser.add_pattern( - "orderings", - r"Landmark graph contains (\d+) orderings.", - type=int) - -parser.parse() diff --git a/experiments/issue1044/v1-optimal.py b/experiments/issue1044/v1-optimal.py deleted file mode 100755 index 912b1912a7..0000000000 --- a/experiments/issue1044/v1-optimal.py +++ /dev/null @@ -1,89 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -import os - -from common_setup import IssueConfig, IssueExperiment -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -ISSUE = "issue1044" - - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - (f"{ISSUE}-base-seq-opt-bjolp", f"{ISSUE}-v1-seq-opt-bjolp"), - (f"{ISSUE}-base-seq-opt-bjolp-opt", f"{ISSUE}-v1-seq-opt-bjolp-opt"), - (f"{ISSUE}-base-lm-exhaust", f"{ISSUE}-v1-lm-exhaust"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - -REVISIONS = [ - f"{ISSUE}-base", - f"{ISSUE}-v1", -] - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp", - "--overall-time-limit", "5m"]), - IssueConfig("lm-exhaust", - ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"], - driver_options=["--overall-time-limit", "5m"]), - IssueConfig("seq-opt-bjolp-opt", - ["--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"], - driver_options=["--overall-time-limit", "5m"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - setup="export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/buecle01/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib", - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctiv", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time", min_wins=True), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1044/v1-satisficing.py b/experiments/issue1044/v1-satisficing.py deleted file mode 100755 index 84f95525b3..0000000000 --- a/experiments/issue1044/v1-satisficing.py +++ /dev/null @@ -1,92 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -import os - -from common_setup import IssueConfig, IssueExperiment -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -ISSUE = "issue1044" - - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - (f"{ISSUE}-base-lama-first", f"{ISSUE}-v1-lama-first"), - (f"{ISSUE}-base-lama-first-pref", f"{ISSUE}-v1-lama-first-pref"), - (f"{ISSUE}-base-lm-zg", f"{ISSUE}-v1-lm-zg"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - -REVISIONS = [ - f"{ISSUE}-base", - f"{ISSUE}-v1", -] - -CONFIGS = [ - IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first", - "--overall-time-limit", "5m"]), - IssueConfig( - "lama-first-pref", ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""], - driver_options=["--overall-time-limit", "5m"], - ), - IssueConfig("lm-zg", ["--search", "eager_greedy([lmcount(lm_zg())])"], - driver_options=["--overall-time-limit", "5m"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - setup="export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/buecle01/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib", - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctiv", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time", min_wins=True), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1044/v2-optimal.py b/experiments/issue1044/v2-optimal.py deleted file mode 100755 index a770e6f7ae..0000000000 --- a/experiments/issue1044/v2-optimal.py +++ /dev/null @@ -1,89 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -import os - -from common_setup import IssueConfig, IssueExperiment -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -ISSUE = "issue1044" - - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - (f"{ISSUE}-base-seq-opt-bjolp", f"{ISSUE}-v2-seq-opt-bjolp"), - (f"{ISSUE}-base-seq-opt-bjolp-opt", f"{ISSUE}-v2-seq-opt-bjolp-opt"), - (f"{ISSUE}-base-lm-exhaust", f"{ISSUE}-v2-lm-exhaust"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - -REVISIONS = [ - f"{ISSUE}-base", - f"{ISSUE}-v2", -] - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp", - "--overall-time-limit", "5m"]), - IssueConfig("lm-exhaust", - ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"], - driver_options=["--overall-time-limit", "5m"]), - IssueConfig("seq-opt-bjolp-opt", - ["--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"], - driver_options=["--overall-time-limit", "5m"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - setup="export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/buecle01/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib", - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctiv", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time", min_wins=True), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1044/v2-satisficing.py b/experiments/issue1044/v2-satisficing.py deleted file mode 100755 index af5c1b95b0..0000000000 --- a/experiments/issue1044/v2-satisficing.py +++ /dev/null @@ -1,92 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -import os - -from common_setup import IssueConfig, IssueExperiment -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -ISSUE = "issue1044" - - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - (f"{ISSUE}-base-lama-first", f"{ISSUE}-v2-lama-first"), - (f"{ISSUE}-base-lama-first-pref", f"{ISSUE}-v2-lama-first-pref"), - (f"{ISSUE}-base-lm-zg", f"{ISSUE}-v2-lm-zg"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - -REVISIONS = [ - f"{ISSUE}-base", - f"{ISSUE}-v2", -] - -CONFIGS = [ - IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first", - "--overall-time-limit", "5m"]), - IssueConfig( - "lama-first-pref", ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""], - driver_options=["--overall-time-limit", "5m"], - ), - IssueConfig("lm-zg", ["--search", "eager_greedy([lmcount(lm_zg())])"], - driver_options=["--overall-time-limit", "5m"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - setup="export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/buecle01/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib", - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctiv", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time", min_wins=True), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1045/common_setup.py b/experiments/issue1045/common_setup.py deleted file mode 100644 index befc4d149e..0000000000 --- a/experiments/issue1045/common_setup.py +++ /dev/null @@ -1,395 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, lambda: make_scatter_plots) diff --git a/experiments/issue1045/landmark_parser.py b/experiments/issue1045/landmark_parser.py deleted file mode 100755 index d32ec08819..0000000000 --- a/experiments/issue1045/landmark_parser.py +++ /dev/null @@ -1,29 +0,0 @@ -#! /usr/bin/env python - -import re - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern( - "lmgraph_generation_time", - r"Landmark graph generation time: (.+)s", - type=float) -parser.add_pattern( - "landmarks", - r"Landmark graph contains (\d+) landmarks, of which \d+ are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_disjunctive", - r"Landmark graph contains \d+ landmarks, of which (\d+) are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_conjunctive", - r"Landmark graph contains \d+ landmarks, of which \d+ are disjunctive and (\d+) are conjunctive.", - type=int) -parser.add_pattern( - "orderings", - r"Landmark graph contains (\d+) orderings.", - type=int) - -parser.parse() diff --git a/experiments/issue1045/requirements.txt b/experiments/issue1045/requirements.txt deleted file mode 100644 index d8deddd8ff..0000000000 --- a/experiments/issue1045/requirements.txt +++ /dev/null @@ -1,23 +0,0 @@ -certifi==2019.9.11 -chardet==3.0.4 -cplex==12.10.0.0 -cycler==0.11.0 -docloud==1.0.375 -docplex==2.11.176 -enum34==1.1.6 -fonttools==4.31.2 -idna==2.8 -kiwisolver==1.4.2 -lab==7.0 -matplotlib==3.5.1 -numpy==1.22.2 -packaging==21.3 -Pillow==9.0.1 -pyparsing==3.0.7 -python-dateutil==2.8.2 -requests==2.22.0 -simplejson==3.17.6 -six==1.12.0 -txt2tags==3.7 -typing_extensions==4.1.1 -urllib3==1.26.5 diff --git a/experiments/issue1045/v1-optimal.py b/experiments/issue1045/v1-optimal.py deleted file mode 100755 index f5adb23a92..0000000000 --- a/experiments/issue1045/v1-optimal.py +++ /dev/null @@ -1,89 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -import os - -from common_setup import IssueConfig, IssueExperiment -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -ISSUE = "issue1045" - - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - (f"{ISSUE}-base-seq-opt-bjolp", f"{ISSUE}-v1-seq-opt-bjolp"), - (f"{ISSUE}-base-seq-opt-bjolp-opt", f"{ISSUE}-v1-seq-opt-bjolp-opt"), - (f"{ISSUE}-base-lm-exhaust", f"{ISSUE}-v1-lm-exhaust"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - -REVISIONS = [ - f"{ISSUE}-base", - f"{ISSUE}-v1", -] - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp", - "--overall-time-limit", "5m"]), - IssueConfig("lm-exhaust", - ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"], - driver_options=["--overall-time-limit", "5m"]), - IssueConfig("seq-opt-bjolp-opt", - ["--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"], - driver_options=["--overall-time-limit", "5m"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="salome.eriksson@unibas.ch", - setup="export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/buecle01/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib", - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctive", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time", min_wins=True), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1045/v1-satisficing.py b/experiments/issue1045/v1-satisficing.py deleted file mode 100755 index 861d46ae0a..0000000000 --- a/experiments/issue1045/v1-satisficing.py +++ /dev/null @@ -1,92 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -import os - -from common_setup import IssueConfig, IssueExperiment -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -ISSUE = "issue1045" - - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - (f"{ISSUE}-base-lama-first", f"{ISSUE}-v1-lama-first"), - (f"{ISSUE}-base-lama-first-pref", f"{ISSUE}-v1-lama-first-pref"), - (f"{ISSUE}-base-lm-zg", f"{ISSUE}-v1-lm-zg"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - -REVISIONS = [ - f"{ISSUE}-base", - f"{ISSUE}-v1", -] - -CONFIGS = [ - IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first", - "--overall-time-limit", "5m"]), - IssueConfig( - "lama-first-pref", ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""], - driver_options=["--overall-time-limit", "5m"], - ), - IssueConfig("lm-zg", ["--search", "eager_greedy([lmcount(lm_zg())])"], - driver_options=["--overall-time-limit", "5m"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="salome.eriksson@unibas.ch", - setup="export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/buecle01/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib", - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctiv", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time", min_wins=True), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1045/v2-optimal.py b/experiments/issue1045/v2-optimal.py deleted file mode 100755 index 13d79b1cc1..0000000000 --- a/experiments/issue1045/v2-optimal.py +++ /dev/null @@ -1,89 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -import os - -from common_setup import IssueConfig, IssueExperiment -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -ISSUE = "issue1045" - - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - (f"{ISSUE}-base-seq-opt-bjolp", f"{ISSUE}-v2-seq-opt-bjolp"), - (f"{ISSUE}-base-seq-opt-bjolp-opt", f"{ISSUE}-v2-seq-opt-bjolp-opt"), - (f"{ISSUE}-base-lm-exhaust", f"{ISSUE}-v2-lm-exhaust"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - -REVISIONS = [ - f"{ISSUE}-base", - f"{ISSUE}-v2", -] - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp", - "--overall-time-limit", "5m"]), - IssueConfig("lm-exhaust", - ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"], - driver_options=["--overall-time-limit", "5m"]), - IssueConfig("seq-opt-bjolp-opt", - ["--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"], - driver_options=["--overall-time-limit", "5m"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="salome.eriksson@unibas.ch", - setup="export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/buecle01/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib", - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctive", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time", min_wins=True), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1045/v2-satisficing.py b/experiments/issue1045/v2-satisficing.py deleted file mode 100755 index a1c17bee01..0000000000 --- a/experiments/issue1045/v2-satisficing.py +++ /dev/null @@ -1,92 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -import os - -from common_setup import IssueConfig, IssueExperiment -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -ISSUE = "issue1045" - - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - (f"{ISSUE}-base-lama-first", f"{ISSUE}-v2-lama-first"), - (f"{ISSUE}-base-lama-first-pref", f"{ISSUE}-v2-lama-first-pref"), - (f"{ISSUE}-base-lm-zg", f"{ISSUE}-v2-lm-zg"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - -REVISIONS = [ - f"{ISSUE}-base", - f"{ISSUE}-v2", -] - -CONFIGS = [ - IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first", - "--overall-time-limit", "5m"]), - IssueConfig( - "lama-first-pref", ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""], - driver_options=["--overall-time-limit", "5m"], - ), - IssueConfig("lm-zg", ["--search", "eager_greedy([lmcount(lm_zg())])"], - driver_options=["--overall-time-limit", "5m"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="salome.eriksson@unibas.ch", - setup="export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/buecle01/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib", - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctiv", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time", min_wins=True), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1045/v3-optimal.py b/experiments/issue1045/v3-optimal.py deleted file mode 100755 index 666b06b419..0000000000 --- a/experiments/issue1045/v3-optimal.py +++ /dev/null @@ -1,95 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -import os - -from common_setup import IssueConfig, IssueExperiment -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -ISSUE = "issue1045" - - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - (f"{ISSUE}-base-seq-opt-bjolp", f"{ISSUE}-v3-seq-opt-bjolp"), - (f"{ISSUE}-base-seq-opt-bjolp-opt-cplex", f"{ISSUE}-v3-seq-opt-bjolp-opt-cplex"), - (f"{ISSUE}-base-seq-opt-bjolp-opt-soplex", f"{ISSUE}-v3-seq-opt-bjolp-opt-soplex"), - (f"{ISSUE}-base-lm-exhaust", f"{ISSUE}-v3-lm-exhaust"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - -REVISIONS = [ - f"{ISSUE}-base", - f"{ISSUE}-v3", -] - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp", - "--overall-time-limit", "5m"]), - IssueConfig("lm-exhaust", - ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"], - driver_options=["--overall-time-limit", "5m"]), - IssueConfig("seq-opt-bjolp-opt-cplex", - ["--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true, lpsolver=CPLEX)", - "--search", "astar(lmc,lazy_evaluator=lmc)"], - driver_options=["--overall-time-limit", "5m"]), - IssueConfig("seq-opt-bjolp-opt-soplex", - ["--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true, lpsolver=SOPLEX)", - "--search", "astar(lmc,lazy_evaluator=lmc)"], - driver_options=["--overall-time-limit", "5m"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="salome.eriksson@unibas.ch", - setup="export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/buecle01/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib", - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctive", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time", min_wins=True), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1045/v3-satisficing.py b/experiments/issue1045/v3-satisficing.py deleted file mode 100755 index b90602de53..0000000000 --- a/experiments/issue1045/v3-satisficing.py +++ /dev/null @@ -1,92 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -import os - -from common_setup import IssueConfig, IssueExperiment -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -ISSUE = "issue1045" - - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - (f"{ISSUE}-base-lama-first", f"{ISSUE}-v3-lama-first"), - (f"{ISSUE}-base-lama-first-pref", f"{ISSUE}-v3-lama-first-pref"), - (f"{ISSUE}-base-lm-zg", f"{ISSUE}-v3-lm-zg"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - -REVISIONS = [ - f"{ISSUE}-base", - f"{ISSUE}-v3", -] - -CONFIGS = [ - IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first", - "--overall-time-limit", "5m"]), - IssueConfig( - "lama-first-pref", ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""], - driver_options=["--overall-time-limit", "5m"], - ), - IssueConfig("lm-zg", ["--search", "eager_greedy([lmcount(lm_zg())])"], - driver_options=["--overall-time-limit", "5m"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="salome.eriksson@unibas.ch", - setup="export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/buecle01/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib", - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctiv", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time", min_wins=True), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1049/common_setup.py b/experiments/issue1049/common_setup.py deleted file mode 100644 index ae9af26890..0000000000 --- a/experiments/issue1049/common_setup.py +++ /dev/null @@ -1,395 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, lambda: make_scatter_plots) diff --git a/experiments/issue1049/dead-end_parser-v2.py b/experiments/issue1049/dead-end_parser-v2.py deleted file mode 100755 index cb81418dea..0000000000 --- a/experiments/issue1049/dead-end_parser-v2.py +++ /dev/null @@ -1,21 +0,0 @@ -#! /usr/bin/env python - -import re - -from lab.parser import Parser - -parser = Parser() - -def count_dead_ends(content, props): - empty_first_matches_v1 = re.findall(r"Found unreached landmark with empty first achievers.", content) - empty_first_matches_v2 = re.findall(r"Found unreached landmark in initial state with empty first achievers.", content) - props["dead-end_empty_first_achievers"] = len(empty_first_matches_v1) + len(empty_first_matches_v2) - - empty_possible_matches = re.findall(r"Found needed-again landmark with empty possible achievers.", content) - props["dead-end_empty_possible_achievers"] = len(empty_possible_matches) - - return props - -parser.add_function(count_dead_ends) - -parser.parse() diff --git a/experiments/issue1049/dead-end_parser.py b/experiments/issue1049/dead-end_parser.py deleted file mode 100755 index 0f4bb2ad68..0000000000 --- a/experiments/issue1049/dead-end_parser.py +++ /dev/null @@ -1,21 +0,0 @@ -#! /usr/bin/env python - -import re - -from lab.parser import Parser - -parser = Parser() - -def count_dead_ends(content, props): - empty_first_matches = re.findall(r"Found unreached landmark with empty first achievers.", content) - props["dead-end_empty_first_achievers"] = len(empty_first_matches) - - empty_possible_matches = re.findall(r"Found needed-again landmark with empty possible achievers.", content) - props["dead-end_empty_possible_achievers"] = len(empty_possible_matches) - - print("hallo welt") - return props - -parser.add_function(count_dead_ends) - -parser.parse() diff --git a/experiments/issue1049/landmark_parser.py b/experiments/issue1049/landmark_parser.py deleted file mode 100755 index 943492471b..0000000000 --- a/experiments/issue1049/landmark_parser.py +++ /dev/null @@ -1,29 +0,0 @@ -#! /usr/bin/env python - -import re - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern( - "lmgraph_generation_time", - r"Landmark graph generation time: (.+)s", - type=float) -parser.add_pattern( - "landmarks", - r"Landmark graph contains (\d+) landmarks, of which \d+ are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_disjunctive", - r"Landmark graph contains \d+ landmarks, of which (\d+) are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_conjunctive", - r"Landmark graph contains \d+ landmarks, of which \d+ are disjunctive and (\d+) are conjunctive.", - type=int) -parser.add_pattern( - "orderings", - r"Landmark graph contains (\d+) orderings.", - type=int) - -parser.parse() diff --git a/experiments/issue1049/v1-optimal.py b/experiments/issue1049/v1-optimal.py deleted file mode 100755 index 719a3d3e54..0000000000 --- a/experiments/issue1049/v1-optimal.py +++ /dev/null @@ -1,73 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from downward.reports.absolute import AbsoluteReport - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - - - -REVISIONS = [ - "issue1049-v1", -] - -DRIVER_OPTIONS = ["--overall-time-limit", "5m"] - -CONFIGS = [ - common_setup.IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]+DRIVER_OPTIONS), - common_setup.IssueConfig("lm-exhaust", ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"], driver_options=DRIVER_OPTIONS), - common_setup.IssueConfig("lm-hm2", ["--evaluator", "lmc=lmcount(lm_hm(m=2),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"], driver_options=DRIVER_OPTIONS), - common_setup.IssueConfig("seq-opt-bjolp-opt", ["--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"], driver_options=DRIVER_OPTIONS), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="remo.christen@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") -exp.add_parser("dead-end_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("dead-end_empty_first_achievers"), - Attribute("dead-end_empty_possible_achievers"), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_report(AbsoluteReport(attributes=ATTRIBUTES), outfile="issue1049-logging-report.html") -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1049/v1-satisficing.py b/experiments/issue1049/v1-satisficing.py deleted file mode 100755 index d35a133ae7..0000000000 --- a/experiments/issue1049/v1-satisficing.py +++ /dev/null @@ -1,75 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup - -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from downward.reports.absolute import AbsoluteReport - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - - - -REVISIONS = [ - "issue1049-v1", -] - -DRIVER_OPTIONS = ["--overall-time-limit", "5m"] - -CONFIGS = [ - common_setup.IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]+DRIVER_OPTIONS), - common_setup.IssueConfig("lm-exhaust", ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"], driver_options=DRIVER_OPTIONS), - common_setup.IssueConfig("lm-hm2", ["--evaluator", "lmc=lmcount(lm_hm(m=2),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"], driver_options=DRIVER_OPTIONS), - common_setup.IssueConfig("seq-opt-bjolp-opt", ["--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"], driver_options=DRIVER_OPTIONS), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="remo.christen@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") -exp.add_parser("dead-end_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("dead-end_empty_first_achievers"), - Attribute("dead-end_empty_possible_achievers"), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_report(AbsoluteReport(attributes=ATTRIBUTES), outfile="issue1049-logging-report.html") -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1049/v2-optimal.py b/experiments/issue1049/v2-optimal.py deleted file mode 100755 index 339872c217..0000000000 --- a/experiments/issue1049/v2-optimal.py +++ /dev/null @@ -1,93 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -ISSUE = "issue1049" - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - (f"{ISSUE}-v1-seq-opt-bjolp", f"{ISSUE}-v2-seq-opt-bjolp"), - (f"{ISSUE}-v1-lm-exhaust", f"{ISSUE}-v2-lm-exhaust"), - (f"{ISSUE}-v1-lm-hm2", f"{ISSUE}-v2-lm-hm2"), - (f"{ISSUE}-v1-seq-opt-bjolp-opt", f"{ISSUE}-v2-seq-opt-bjolp-opt"), - ], attributes=ATTRIBUTES, filter=remove_unfinished_tasks - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - - -REVISIONS = [ - f"{ISSUE}-v1", - f"{ISSUE}-v2", -] - -DRIVER_OPTIONS = ["--overall-time-limit", "5m"] - -CONFIGS = [ - common_setup.IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]+DRIVER_OPTIONS), - common_setup.IssueConfig("lm-exhaust", ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"], driver_options=DRIVER_OPTIONS), - common_setup.IssueConfig("lm-hm2", ["--evaluator", "lmc=lmcount(lm_hm(m=2),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"], driver_options=DRIVER_OPTIONS), - common_setup.IssueConfig("seq-opt-bjolp-opt", ["--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"], driver_options=DRIVER_OPTIONS), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="remo.christen@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -def remove_unfinished_tasks(run): - if "expansions" in run: - run["dead-end_empty_possible_achievers_finished"] = run["dead-end_empty_possible_achievers"] - return True - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") -exp.add_parser("dead-end_parser-v2.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("dead-end_empty_first_achievers", min_wins=False), - Attribute("dead-end_empty_possible_achievers", min_wins=False), - Attribute("dead-end_empty_possible_achievers_finished", min_wins=False), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() diff --git a/experiments/issue1049/v2-satisficing.py b/experiments/issue1049/v2-satisficing.py deleted file mode 100755 index cf6f03b92f..0000000000 --- a/experiments/issue1049/v2-satisficing.py +++ /dev/null @@ -1,104 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup - -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -ISSUE = "issue1049" - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - (f"{ISSUE}-v1-lama-first", f"{ISSUE}-v2-lama-first"), - (f"{ISSUE}-v1-lama-first-pref", f"{ISSUE}-v2-lama-first-pref"), - (f"{ISSUE}-v1-lm-zg", f"{ISSUE}-v2-lm-zg"), - ], attributes=ATTRIBUTES, filter=remove_unfinished_tasks - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - - -REVISIONS = [ - f"{ISSUE}-v1", - f"{ISSUE}-v2", -] - -CONFIGS = [ - common_setup.IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first", - "--overall-time-limit", "5m"]), - common_setup.IssueConfig( - "lama-first-pref", ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""], - driver_options=["--overall-time-limit", "5m"], - ), - common_setup.IssueConfig("lm-zg", ["--search", "eager_greedy([lmcount(lm_zg())])"], - driver_options=["--overall-time-limit", "5m"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="remo.christen@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -def remove_unfinished_tasks(run): - if "expansions" in run: - run["dead-end_empty_possible_achievers_finished"] = run["dead-end_empty_possible_achievers"] - return True - - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") -exp.add_parser("dead-end_parser-v2.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("dead-end_empty_first_achievers", min_wins=False), - Attribute("dead-end_empty_possible_achievers", min_wins=False), - Attribute("dead-end_empty_possible_achievers_finished", min_wins=False), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1049/v3-optimal-30min.py b/experiments/issue1049/v3-optimal-30min.py deleted file mode 100755 index 03c0009ada..0000000000 --- a/experiments/issue1049/v3-optimal-30min.py +++ /dev/null @@ -1,83 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -ISSUE = "issue1049" - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - (f"{ISSUE}-base-seq-opt-bjolp", f"{ISSUE}-v3-seq-opt-bjolp"), - (f"{ISSUE}-base-lm-exhaust", f"{ISSUE}-v3-lm-exhaust"), - (f"{ISSUE}-base-lm-hm2", f"{ISSUE}-v3-lm-hm2"), - (f"{ISSUE}-base-seq-opt-bjolp-opt", f"{ISSUE}-v3-seq-opt-bjolp-opt"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - - -REVISIONS = [ - f"{ISSUE}-base", - f"{ISSUE}-v3", -] - -CONFIGS = [ - common_setup.IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), - common_setup.IssueConfig("lm-exhaust", ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), - common_setup.IssueConfig("lm-hm2", ["--evaluator", "lmc=lmcount(lm_hm(m=2),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), - common_setup.IssueConfig("seq-opt-bjolp-opt", ["--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="remo.christen@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("lmgraph_generation_time"), - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() diff --git a/experiments/issue1049/v3-optimal.py b/experiments/issue1049/v3-optimal.py deleted file mode 100755 index a1b251eb82..0000000000 --- a/experiments/issue1049/v3-optimal.py +++ /dev/null @@ -1,84 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -ISSUE = "issue1049" - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - (f"{ISSUE}-base-seq-opt-bjolp", f"{ISSUE}-v3-seq-opt-bjolp"), - (f"{ISSUE}-base-lm-exhaust", f"{ISSUE}-v3-lm-exhaust"), - (f"{ISSUE}-base-lm-hm2", f"{ISSUE}-v3-lm-hm2"), - (f"{ISSUE}-base-seq-opt-bjolp-opt", f"{ISSUE}-v3-seq-opt-bjolp-opt"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - - -REVISIONS = [ - f"{ISSUE}-base", - f"{ISSUE}-v3", -] - -DRIVER_OPTIONS = ["--overall-time-limit", "5m"] - -CONFIGS = [ - common_setup.IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]+DRIVER_OPTIONS), - common_setup.IssueConfig("lm-exhaust", ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"], driver_options=DRIVER_OPTIONS), - common_setup.IssueConfig("lm-hm2", ["--evaluator", "lmc=lmcount(lm_hm(m=2),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"], driver_options=DRIVER_OPTIONS), - common_setup.IssueConfig("seq-opt-bjolp-opt", ["--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"], driver_options=DRIVER_OPTIONS), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="remo.christen@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() diff --git a/experiments/issue1049/v3-satisficing-30min.py b/experiments/issue1049/v3-satisficing-30min.py deleted file mode 100755 index 90bf117e3e..0000000000 --- a/experiments/issue1049/v3-satisficing-30min.py +++ /dev/null @@ -1,92 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup - -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -ISSUE = "issue1049" - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - (f"{ISSUE}-base-lama-first", f"{ISSUE}-v3-lama-first"), - (f"{ISSUE}-base-lama-first-pref", f"{ISSUE}-v3-lama-first-pref"), - (f"{ISSUE}-base-lm-zg", f"{ISSUE}-v3-lm-zg"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - - -REVISIONS = [ - f"{ISSUE}-base", - f"{ISSUE}-v3", -] - -CONFIGS = [ - common_setup.IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), - common_setup.IssueConfig( - "lama-first-pref", ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""], - ), - common_setup.IssueConfig("lm-zg", ["--search", "eager_greedy([lmcount(lm_zg())])"],), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="remo.christen@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("lmgraph_generation_time"), - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1049/v3-satisficing.py b/experiments/issue1049/v3-satisficing.py deleted file mode 100755 index aa0f6c77ef..0000000000 --- a/experiments/issue1049/v3-satisficing.py +++ /dev/null @@ -1,94 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup - -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -ISSUE = "issue1049" - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - (f"{ISSUE}-base-lama-first", f"{ISSUE}-v3-lama-first"), - (f"{ISSUE}-base-lama-first-pref", f"{ISSUE}-v3-lama-first-pref"), - (f"{ISSUE}-base-lm-zg", f"{ISSUE}-v3-lm-zg"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - - -REVISIONS = [ - f"{ISSUE}-base", - f"{ISSUE}-v3", -] - -CONFIGS = [ - common_setup.IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first", - "--overall-time-limit", "5m"]), - common_setup.IssueConfig( - "lama-first-pref", ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""], - driver_options=["--overall-time-limit", "5m"], - ), - common_setup.IssueConfig("lm-zg", ["--search", "eager_greedy([lmcount(lm_zg())])"], - driver_options=["--overall-time-limit", "5m"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="remo.christen@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue1055/common_setup.py b/experiments/issue1055/common_setup.py deleted file mode 100644 index 345b33d31c..0000000000 --- a/experiments/issue1055/common_setup.py +++ /dev/null @@ -1,529 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - -ALL_SUITE = [ - 'agricola-opt18-strips', - 'agricola-sat18-strips', - 'airport', - 'airport-adl', - 'assembly', - 'barman-mco14-strips', - 'barman-opt11-strips', - 'barman-opt14-strips', - 'barman-sat11-strips', - 'barman-sat14-strips', - 'blocks', - 'caldera-opt18-adl', - 'caldera-sat18-adl', - 'caldera-split-opt18-adl', - 'caldera-split-sat18-adl', - 'cavediving-14-adl', - 'childsnack-opt14-strips', - 'childsnack-sat14-strips', - 'citycar-opt14-adl', - 'citycar-sat14-adl', - 'data-network-opt18-strips', - 'data-network-sat18-strips', - 'depot', - 'driverlog', - 'elevators-opt08-strips', - 'elevators-opt11-strips', - 'elevators-sat08-strips', - 'elevators-sat11-strips', - 'flashfill-sat18-adl', - 'floortile-opt11-strips', - 'floortile-opt14-strips', - 'floortile-sat11-strips', - 'floortile-sat14-strips', - 'freecell', - 'ged-opt14-strips', - 'ged-sat14-strips', - 'grid', - 'gripper', - 'hiking-agl14-strips', - 'hiking-opt14-strips', - 'hiking-sat14-strips', - 'logistics00', - 'logistics98', - 'maintenance-opt14-adl', - 'maintenance-sat14-adl', - 'miconic', - 'miconic-fulladl', - 'miconic-simpleadl', - 'movie', - 'mprime', - 'mystery', - 'nomystery-opt11-strips', - 'nomystery-sat11-strips', - 'nurikabe-opt18-adl', - 'nurikabe-sat18-adl', - 'openstacks', - 'openstacks-agl14-strips', - 'openstacks-opt08-adl', - 'openstacks-opt08-strips', - 'openstacks-opt11-strips', - 'openstacks-opt14-strips', - 'openstacks-sat08-adl', - 'openstacks-sat08-strips', - 'openstacks-sat11-strips', - 'openstacks-sat14-strips', - 'openstacks-strips', - 'optical-telegraphs', - 'organic-synthesis-opt18-strips', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-opt18-strips', - 'organic-synthesis-split-sat18-strips', - 'parcprinter-08-strips', - 'parcprinter-opt11-strips', - 'parcprinter-sat11-strips', - 'parking-opt11-strips', - 'parking-opt14-strips', - 'parking-sat11-strips', - 'parking-sat14-strips', - 'pathways', - 'pegsol-08-strips', - 'pegsol-opt11-strips', - 'pegsol-sat11-strips', - 'petri-net-alignment-opt18-strips', - 'philosophers', - 'pipesworld-notankage', - 'pipesworld-tankage', - 'psr-large', - 'psr-middle', - 'psr-small', - 'rovers', - 'satellite', - 'scanalyzer-08-strips', - 'scanalyzer-opt11-strips', - 'scanalyzer-sat11-strips', - 'schedule', - 'settlers-opt18-adl', - 'settlers-sat18-adl', - 'snake-opt18-strips', - 'snake-sat18-strips', - 'sokoban-opt08-strips', - 'sokoban-opt11-strips', - 'sokoban-sat08-strips', - 'sokoban-sat11-strips', - 'spider-opt18-strips', - 'spider-sat18-strips', - 'storage', - 'termes-opt18-strips', - 'termes-sat18-strips', - 'tetris-opt14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'thoughtful-sat14-strips', - 'tidybot-opt11-strips', - 'tidybot-opt14-strips', - 'tidybot-sat11-strips', - 'tpp', - 'transport-opt08-strips', - 'transport-opt11-strips', - 'transport-opt14-strips', - 'transport-sat08-strips', - 'transport-sat11-strips', - 'transport-sat14-strips', - 'trucks', - 'trucks-strips', - 'visitall-opt11-strips', - 'visitall-opt14-strips', - 'visitall-sat11-strips', - 'visitall-sat14-strips', - 'woodworking-opt08-strips', - 'woodworking-opt11-strips', - 'woodworking-sat08-strips', - 'woodworking-sat11-strips', - 'zenotravel' -] - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - # self.add_step( - # 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - # self.add_step( - # "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue1055/negative_example/domain.pddl b/experiments/issue1055/negative_example/domain.pddl deleted file mode 100644 index 98eac81361..0000000000 --- a/experiments/issue1055/negative_example/domain.pddl +++ /dev/null @@ -1,10 +0,0 @@ -(define (domain latent) - (:requirements :strips :negative-preconditions) - (:predicates (p0 ?x0) (p1 ?x0)) - (:action a1 - :parameters (?x0) :precondition - (and (not (p0 ?x0))) - :effect - (and (p0 ?x0)) - ) -) \ No newline at end of file diff --git a/experiments/issue1055/negative_example/problem.pddl b/experiments/issue1055/negative_example/problem.pddl deleted file mode 100644 index 4c7c5eee6a..0000000000 --- a/experiments/issue1055/negative_example/problem.pddl +++ /dev/null @@ -1,9 +0,0 @@ -(define (problem latent) (:domain latent) - (:objects o0) - (:init - (p1 o0) - ) - (:goal - (and (p0 o0) (not (p1 o0)) - - ))) diff --git a/experiments/issue1055/translator_additional_parser.py b/experiments/issue1055/translator_additional_parser.py deleted file mode 100755 index c77b95d0f1..0000000000 --- a/experiments/issue1055/translator_additional_parser.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -import hashlib - -from lab.parser import Parser - -def add_hash_value(content, props): - props['translator_output_sas_hash'] = hashlib.sha512(content.encode()).hexdigest() - -parser = Parser() -parser.add_function(add_hash_value, file="output.sas") -parser.parse() diff --git a/experiments/issue1055/v1.py b/experiments/issue1055/v1.py deleted file mode 100755 index 33bcdaef4a..0000000000 --- a/experiments/issue1055/v1.py +++ /dev/null @@ -1,83 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab import tools - -from downward.reports.compare import ComparativeReport -from downward.reports import PlanningReport -from downward.experiment import FastDownwardExperiment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = [f"issue1055-{version}" for version in ["base", "v1"]] -CONFIGS = [ - IssueConfig( - "translate-only", - [], - driver_options=["--translate"]) -] - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) -else: - SUITE = common_setup.ALL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="patrick.ferber@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -ATTRIBUTES = [ - 'translator_time_done' -] - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_suite(common_setup.get_script_dir(), "negative_example") - -exp.add_parser('translator_additional_parser.py') -exp.add_parser(FastDownwardExperiment.TRANSLATOR_PARSER) - - -class TranslatorDiffReport(PlanningReport): - def get_cell(self, run): - return ";".join(run.get(attr) for attr in self.attributes) - - def get_text(self): - lines = [] - for runs in self.problem_runs.values(): - lhashes = [r.get("translator_output_sas_hash") for r in runs] - hashes = set(lhashes) - reason = "" - if None in hashes: - reason = f"{len([h for h in lhashes if h is None])} failed + " - if len(hashes) > 1: - reason += f"{len([h for h in lhashes if h is not None])} differ" - if len(reason): - lines.append(reason + ";" + ";".join([self.get_cell(r) for r in runs])) - return "\n".join(lines) - - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_comparison_table_step(attributes=ATTRIBUTES) - -exp.add_report(TranslatorDiffReport( - attributes=["domain", "problem", "algorithm", "run_dir"] - ), outfile="different_output_sas.csv" -) - -exp.run_steps() - diff --git a/experiments/issue1058/common_setup.py b/experiments/issue1058/common_setup.py deleted file mode 100644 index eecf49e971..0000000000 --- a/experiments/issue1058/common_setup.py +++ /dev/null @@ -1,427 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, name="make-comparison-tables", revisions=[], **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - if not revisions: - revisions = self._revisions - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(name, make_comparison_tables) - self.add_step( - f"publish-{name}", publish_comparison_tables) - - def add_comparison_table_step_for_revision_pairs( - self, revision_pairs, name="make-comparison-tables-for-revision-pairs", **kwargs): - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in revision_pairs: - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in revision_pairs: - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(name, make_comparison_tables) - self.add_step( - f"publish-{name}", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue1058/requirements.in b/experiments/issue1058/requirements.in deleted file mode 100644 index cfc7664985..0000000000 --- a/experiments/issue1058/requirements.in +++ /dev/null @@ -1 +0,0 @@ -lab==7.1 diff --git a/experiments/issue1058/requirements.txt b/experiments/issue1058/requirements.txt deleted file mode 100644 index 66807c2b13..0000000000 --- a/experiments/issue1058/requirements.txt +++ /dev/null @@ -1,34 +0,0 @@ -# -# This file is autogenerated by pip-compile with python 3.8 -# To update, run: -# -# pip-compile requirements.in -# -cycler==0.11.0 - # via matplotlib -fonttools==4.34.4 - # via matplotlib -kiwisolver==1.4.3 - # via matplotlib -lab==7.1 - # via -r requirements.in -matplotlib==3.5.2 - # via lab -numpy==1.23.1 - # via matplotlib -packaging==21.3 - # via matplotlib -pillow==9.2.0 - # via matplotlib -pyparsing==3.0.9 - # via - # matplotlib - # packaging -python-dateutil==2.8.2 - # via matplotlib -simplejson==3.17.6 - # via lab -six==1.16.0 - # via python-dateutil -txt2tags==3.7 - # via lab diff --git a/experiments/issue1058/v1.py b/experiments/issue1058/v1.py deleted file mode 100755 index 2724736841..0000000000 --- a/experiments/issue1058/v1.py +++ /dev/null @@ -1,68 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1042-base", "issue1058-base", "issue1058-v1"] -CONFIGS = [ - IssueConfig('dfp-b50k-t900', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=150))'], driver_options=['--search-time-limit', '5m']), - IssueConfig('lmcut', ['--search', 'astar(lmcut())'], driver_options=['--search-time-limit', '5m']), - IssueConfig('cpdbs', ['--search', 'astar(cpdbs(multiple_cegar(max_pdb_size=1000000,max_collection_size=10000000,pattern_generation_max_time=infinity,total_max_time=100,stagnation_limit=20,blacklist_trigger_percentage=0.75,enable_blacklist_on_stagnation=true,random_seed=2018,verbosity=normal,use_wildcard_plans=false)),verbosity=normal)'], driver_options=['--search-time-limit', '5m']), - IssueConfig('bjolp', ['--evaluator', 'lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)', '--search', 'astar(lmc,lazy_evaluator=lmc)'], driver_options=['--search-time-limit', '5m']), - IssueConfig('blind', ['--search', 'astar(blind())'], driver_options=['--search-time-limit', '5m']), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes = list(exp.DEFAULT_TABLE_ATTRIBUTES) -attributes.append('initial_h_value') - -exp.add_absolute_report_step(attributes=attributes) - -exp.add_comparison_table_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue1059/base.py b/experiments/issue1059/base.py deleted file mode 100755 index 0fb4dfb68e..0000000000 --- a/experiments/issue1059/base.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue1042-base"] -CONFIGS = [ - IssueConfig(f'astar-lmcut', ['--search', f'astar(lmcut())'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple', ['--search', f'astar(lmcut(),pruning=stubborn_sets_simple)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple-limited', ['--search', f'astar(lmcut(),pruning=stubborn_sets_simple(min_required_pruning_ratio=0.2,expansions_before_checking_pruning_ratio=1000))'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom', ['--search', f'astar(lmcut(),pruning=atom_centric_stubborn_sets)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom-limited', ['--search', f'astar(lmcut(),pruning=atom_centric_stubborn_sets(min_required_pruning_ratio=0.2,expansions_before_checking_pruning_ratio=1000))'], driver_options=['--search-time-limit', '5m']), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="silvan.sievers@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes = list(exp.DEFAULT_TABLE_ATTRIBUTES) -attributes.append('initial_h_value') - -exp.add_absolute_report_step(attributes=attributes) - -exp.add_fetcher('data/issue1042-v1-eval', merge=True) -exp.add_comparison_table_step(attributes=attributes, revisions=["issue1042-base", "issue1042-v1"]) - -exp.run_steps() diff --git a/experiments/issue1059/common_setup.py b/experiments/issue1059/common_setup.py deleted file mode 100644 index 9a0ddc07fe..0000000000 --- a/experiments/issue1059/common_setup.py +++ /dev/null @@ -1,396 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions_and_configs=None, path=None, **kwargs): - """ - - If given, *revisions_and_configs* must be a non-empty list of - pairs (tuple of size 2) of revisions and configs, with the - meaning to run all configs on all revisions. - - The first element of the pair, revisions, must be a non-empty - list of revision identifiers, which specify which planner - versions to use in the experiment. The same versions are used - for translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - The second element of the pair, configs, must be a non-empty - list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - revs = set() - confs = set() - for revisions, configs in revisions_and_configs: - for rev in revisions: - revs.add(rev) - for config in configs: - confs.add(config.nick) - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = list(revs) - self._config_nicks = list(confs) - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, name="make-comparison-tables", revisions=[], **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - if not revisions: - revisions = self._revisions - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(revisions, 2): - compared_configs = [] - for config_nick in self._config_nicks: - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(name, make_comparison_tables) - self.add_step( - f"publish-{name}", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config_nick, attributes): - make_scatter_plot(config_nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue1059/pruning_parser.py b/experiments/issue1059/pruning_parser.py deleted file mode 100755 index 1175559bc9..0000000000 --- a/experiments/issue1059/pruning_parser.py +++ /dev/null @@ -1,29 +0,0 @@ -#! /usr/bin/env python - -import re - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern( - "pruning_time", - r"Time for pruning operators: (.+)s", - type=float) -parser.add_pattern( - "landmarks", - r"Landmark graph contains (\d+) landmarks, of which \d+ are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_disjunctive", - r"Landmark graph contains \d+ landmarks, of which (\d+) are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_conjunctive", - r"Landmark graph contains \d+ landmarks, of which \d+ are disjunctive and (\d+) are conjunctive.", - type=int) -parser.add_pattern( - "orderings", - r"Landmark graph contains (\d+) orderings.", - type=int) - -parser.parse() diff --git a/experiments/issue1059/v1.py b/experiments/issue1059/v1.py deleted file mode 100755 index fd1f17b35f..0000000000 --- a/experiments/issue1059/v1.py +++ /dev/null @@ -1,86 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS_AND_CONFIGS = [ - ( - ["issue1059-base"], - [ - IssueConfig(f'astar-blind', ['--search', f'astar(blind())'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-blind-sssimple', ['--search', f'astar(blind(),pruning=stubborn_sets_simple)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-blind-ssec', ['--search', f'astar(blind(),pruning=stubborn_sets_ec)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-blind-ssatom', ['--search', f'astar(blind(),pruning=atom_centric_stubborn_sets)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut', ['--search', f'astar(lmcut())'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple', ['--search', f'astar(lmcut(),pruning=stubborn_sets_simple)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssec', ['--search', f'astar(lmcut(),pruning=stubborn_sets_ec)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom', ['--search', f'astar(lmcut(),pruning=atom_centric_stubborn_sets)'], driver_options=['--search-time-limit', '5m']), - ], - ), - ( - ["issue1059-v1"], - [ - IssueConfig(f'astar-blind', ['--search', f'astar(blind())'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-blind-sssimple', ['--search', f'astar(blind(),pruning=stubborn_sets_simple)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-blind-ssec', ['--search', f'astar(blind(),pruning=stubborn_sets_ec)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-blind-ssatom', ['--search', f'astar(blind(),pruning=atom_centric_stubborn_sets)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut', ['--search', f'astar(lmcut())'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple', ['--search', f'astar(lmcut(),pruning=stubborn_sets_simple)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssec', ['--search', f'astar(lmcut(),pruning=stubborn_sets_ec)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom', ['--search', f'astar(lmcut(),pruning=atom_centric_stubborn_sets)'], driver_options=['--search-time-limit', '5m']), - ], - ) -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="gabriele.roeger@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/roeger/bin:/infai/roeger/local/bin:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions_and_configs=REVISIONS_AND_CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes = list(exp.DEFAULT_TABLE_ATTRIBUTES) -# attributes.append('initial_h_value') - -exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue1059/v2.py b/experiments/issue1059/v2.py deleted file mode 100755 index 9e9ead51af..0000000000 --- a/experiments/issue1059/v2.py +++ /dev/null @@ -1,86 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS_AND_CONFIGS = [ - ( - ["issue1059-v2"], - [ -# IssueConfig(f'astar-blind', ['--search', f'astar(blind())'], driver_options=['--search-time-limit', '5m']), -# IssueConfig(f'astar-blind-sssimple', ['--search', f'astar(blind(),pruning=stubborn_sets_simple)'], driver_options=['--search-time-limit', '5m']), -# IssueConfig(f'astar-blind-ssec', ['--search', f'astar(blind(),pruning=stubborn_sets_ec)'], driver_options=['--search-time-limit', '5m']), -# IssueConfig(f'astar-blind-ssatom', ['--search', f'astar(blind(),pruning=atom_centric_stubborn_sets)'], driver_options=['--search-time-limit', '5m']), -# IssueConfig(f'astar-lmcut', ['--search', f'astar(lmcut())'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple', ['--search', f'astar(lmcut(),pruning=stubborn_sets_simple)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssec', ['--search', f'astar(lmcut(),pruning=stubborn_sets_ec)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom', ['--search', f'astar(lmcut(),pruning=atom_centric_stubborn_sets)'], driver_options=['--search-time-limit', '5m']), - ], - ), - ( - ["issue1059-base"], - [ -# IssueConfig(f'astar-blind', ['--search', f'astar(blind())'], driver_options=['--search-time-limit', '5m']), -# IssueConfig(f'astar-blind-sssimple', ['--search', f'astar(blind(),pruning=stubborn_sets_simple)'], driver_options=['--search-time-limit', '5m']), -# IssueConfig(f'astar-blind-ssec', ['--search', f'astar(blind(),pruning=stubborn_sets_ec)'], driver_options=['--search-time-limit', '5m']), -# IssueConfig(f'astar-blind-ssatom', ['--search', f'astar(blind(),pruning=atom_centric_stubborn_sets)'], driver_options=['--search-time-limit', '5m']), -# IssueConfig(f'astar-lmcut', ['--search', f'astar(lmcut())'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple', ['--search', f'astar(lmcut(),pruning=stubborn_sets_simple)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssec', ['--search', f'astar(lmcut(),pruning=stubborn_sets_ec)'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom', ['--search', f'astar(lmcut(),pruning=atom_centric_stubborn_sets)'], driver_options=['--search-time-limit', '5m']), - ], - ) -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="gabriele.roeger@unibas.ch", - partition="infai_2", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/roeger/bin:/infai/roeger/local/bin:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions_and_configs=REVISIONS_AND_CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes = list(exp.DEFAULT_TABLE_ATTRIBUTES) -# attributes.append('initial_h_value') - -exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue1059/v3.py b/experiments/issue1059/v3.py deleted file mode 100755 index a4e974879a..0000000000 --- a/experiments/issue1059/v3.py +++ /dev/null @@ -1,87 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import math -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS_AND_CONFIGS = [ - ( - ["issue1059-v3"], - [ -# IssueConfig(f'astar-blind', ['--search', f'astar(blind())'], driver_options=['--search-time-limit', '5m']), -# IssueConfig(f'astar-blind-sssimple', ['--search', f'astar(blind(),pruning=stubborn_sets_simple)'], driver_options=['--search-time-limit', '5m']), -# IssueConfig(f'astar-blind-ssec', ['--search', f'astar(blind(),pruning=stubborn_sets_ec)'], driver_options=['--search-time-limit', '5m']), -# IssueConfig(f'astar-blind-ssatom', ['--search', f'astar(blind(),pruning=atom_centric_stubborn_sets)'], driver_options=['--search-time-limit', '5m']), -# IssueConfig(f'astar-lmcut', ['--search', f'astar(lmcut())'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple', ['--search', f'astar(lmcut(),pruning=stubborn_sets_simple(verbosity=verbose))'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssec', ['--search', f'astar(lmcut(),pruning=stubborn_sets_ec(verbosity=verbose))'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom', ['--search', f'astar(lmcut(),pruning=atom_centric_stubborn_sets(verbosity=verbose))'], driver_options=['--search-time-limit', '5m']), - ], - ), - ( - ["issue1059-base"], - [ -# IssueConfig(f'astar-blind', ['--search', f'astar(blind())'], driver_options=['--search-time-limit', '5m']), -# IssueConfig(f'astar-blind-sssimple', ['--search', f'astar(blind(),pruning=stubborn_sets_simple)'], driver_options=['--search-time-limit', '5m']), -# IssueConfig(f'astar-blind-ssec', ['--search', f'astar(blind(),pruning=stubborn_sets_ec)'], driver_options=['--search-time-limit', '5m']), -# IssueConfig(f'astar-blind-ssatom', ['--search', f'astar(blind(),pruning=atom_centric_stubborn_sets)'], driver_options=['--search-time-limit', '5m']), -# IssueConfig(f'astar-lmcut', ['--search', f'astar(lmcut())'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-sssimple', ['--search', f'astar(lmcut(),pruning=stubborn_sets_simple(verbosity=verbose))'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssec', ['--search', f'astar(lmcut(),pruning=stubborn_sets_ec(verbosity=verbose))'], driver_options=['--search-time-limit', '5m']), - IssueConfig(f'astar-lmcut-ssatom', ['--search', f'astar(lmcut(),pruning=atom_centric_stubborn_sets(verbosity=verbose))'], driver_options=['--search-time-limit', '5m']), - ], - ) -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="gabriele.roeger@unibas.ch", - partition="infai_1", - export=[], - # paths obtained via: - # module purge - # module -q load CMake/3.15.3-GCCcore-8.3.0 - # module -q load GCC/8.3.0 - # echo $PATH - # echo $LD_LIBRARY_PATH - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/roeger/bin:/infai/roeger/local/bin:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions_and_configs=REVISIONS_AND_CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("pruning_parser.py") - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes = list(exp.DEFAULT_TABLE_ATTRIBUTES) -attributes.append('pruning_time') - -exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue1070/archive.py b/experiments/issue1070/archive.py deleted file mode 100644 index b8b664707e..0000000000 --- a/experiments/issue1070/archive.py +++ /dev/null @@ -1,107 +0,0 @@ -from pathlib import Path -import subprocess -import tarfile -from tempfile import TemporaryDirectory - -ARCHIVE_HOST = "aifiles" -ARCHIVE_LOCATION = Path("experiments") - -def add_archive_step(exp, path): - """ - Adds a step to the given experiment that will archive it to the - archive location specified in ARCHIVE_LOCATION und the given path. - We archive the following files: - - everything in the same directory as the main experiment script - (except for 'data', '.venv', and '__pycache__') - - all generated reports - - the combined properties file - - all run and error logs - - the source code stored in the experiment data directory - - any files added as resources to the experiment - - The first two items in the above list will be stored unpacked for easier - access while all otherdata will be packed. - """ - def archive(): - archive_path = ARCHIVE_LOCATION / path - _archive_script_dir(exp, ARCHIVE_HOST, archive_path) - _archive_data_dir(exp, ARCHIVE_HOST, archive_path) - _archive_eval_dir(exp, ARCHIVE_HOST, archive_path) - - exp.add_step("archive", archive) - - -def _archive_script_dir(exp, host, archive_path): - """ - Archives everything except 'data', '.venv', and '__pycache__' from the - same directory as the experiment script at host:archive_path/scripts. - """ - script_dir = Path(exp._script).parent - target_path = archive_path / "scripts" - - script_files = [f for f in script_dir.glob("*") - if not f.name not in ["data", ".venv", "__pycache__"]] - _rsync(script_files, host, target_path) - - -def _archive_data_dir(exp, host, archive_path): - """ - Packs all files we want to archive from the experiment's data directory and - then archives the packed data at host:archive_path/data. Specifically, the - archived files are: - - all files directly in the data dir (added resources such as parsers) - - all directories starting with "code_" (source code of all revisions and - the compilied binaries) - - All *.log and *.err files from the run directories - """ - data_dir = Path(exp.path) - target_path = archive_path / "data" - - data_files = [f for f in data_dir.glob("*") if f.is_file()] - data_files.extend([d for d in data_dir.glob("code-*") if d.is_dir()]) - data_files.extend(data_dir.glob("runs*/*/*.log")) - data_files.extend(data_dir.glob("runs*/*/*.err")) - with TemporaryDirectory() as tmpdirname: - packed_filename = Path(tmpdirname) / (exp.name + ".tar.xz") - _pack(data_files, packed_filename, Path(exp.path).parent) - _rsync([packed_filename], host, target_path) - - -def _archive_eval_dir(exp, host, archive_path): - """ - Archives all files in the experiment's eval dir. - If there is a properties file, it will be packed and only the - packed version will be included in the resulting list. - """ - eval_dir = Path(exp.eval_dir) - target_path = archive_path / "data" / eval_dir.name - - filenames = list(eval_dir.glob("*")) - properties = eval_dir / "properties" - if properties.exists(): - filenames.remove(properties) - with TemporaryDirectory() as tmpdirname: - packed_properties = Path(tmpdirname) / "properties.tar.xz" - _pack([properties], packed_properties, eval_dir) - _rsync([packed_properties], host, target_path) - _rsync(filenames, host, target_path) - - -def _pack(filenames, archive_filename, path_prefix): - """ - Packs all files given in filenames into an archive (.tar.xz) located at - archive_filename. The path_prefix is removed in the archive, i.e., - if the filename is '/path/to/file' and the prefix is '/path', the location - inside the archive will be 'to/file'. - """ - with tarfile.open(archive_filename, "w|xz") as f: - for name in filenames: - f.add(name, name.relative_to(path_prefix)) - -def _rsync(filenames, host, target_path): - # Before copying files we have to create the target path on host. - # We could use the rsync option --mkpath but it is only available in newer - # rsync versions (and not in the one running on the grid) - # https://stackoverflow.com/questions/1636889 - subprocess.run(["ssh", host, "mkdir", "-p", target_path]) - subprocess.run(["rsync", "-avz"] + [str(f) for f in filenames] + [f"{host}:{target_path}"]) diff --git a/experiments/issue1070/common_setup.py b/experiments/issue1070/common_setup.py deleted file mode 100644 index d0b72fe313..0000000000 --- a/experiments/issue1070/common_setup.py +++ /dev/null @@ -1,396 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -import archive - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - self.add_step("make-comparison-tables", make_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - if attribute == "cost": - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - scale="log", - get_category=lambda run1, run2: run1["domain"]) - else: - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, make_scatter_plots) - - def add_archive_step(self, archive_path): - archive.add_archive_step(self, archive_path) \ No newline at end of file diff --git a/experiments/issue1070/requirements.txt b/experiments/issue1070/requirements.txt deleted file mode 100644 index cfc7664985..0000000000 --- a/experiments/issue1070/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -lab==7.1 diff --git a/experiments/issue1070/v1-lama-first.py b/experiments/issue1070/v1-lama-first.py deleted file mode 100755 index c83e80ab30..0000000000 --- a/experiments/issue1070/v1-lama-first.py +++ /dev/null @@ -1,59 +0,0 @@ -#! /usr/bin/env python3 - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ARCHIVE_PATH = "ai/downward/issue1070" -DIR = os.path.dirname(os.path.abspath(__file__)) -REPO_DIR = os.environ["DOWNWARD_REPO"] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = [ - "issue1070-base", - "issue1070-v1", -] -CONFIGS = [ - IssueConfig("lama-first-pref", - ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - "lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time", "memory"]) - -exp.add_archive_step(ARCHIVE_PATH) - -exp.run_steps() diff --git a/experiments/issue1070/v1-lama.py b/experiments/issue1070/v1-lama.py deleted file mode 100755 index 2910363af8..0000000000 --- a/experiments/issue1070/v1-lama.py +++ /dev/null @@ -1,84 +0,0 @@ -#! /usr/bin/env python3 - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ARCHIVE_PATH = "ai/downward/issue1070" -DIR = os.path.dirname(os.path.abspath(__file__)) -REPO_DIR = os.environ["DOWNWARD_REPO"] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = [ - "641d70b36", # base - "b5d8fddcf", # v1 -] -CONFIGS = [ - IssueConfig("lama-pref", - ["--if-unit-cost", - "--evaluator", - "hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),pref=true)", - "--evaluator", "hff=ff()", - "--search", """iterated([ - lazy_greedy([hff,hlm],preferred=[hff,hlm]), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1) - ],repeat_last=true,continue_on_fail=true)""", - "--if-non-unit-cost", - "--evaluator", - "hlm1=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff1=ff(transform=adapt_costs(one))", - "--evaluator", - "hlm2=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone),pref=true)", - "--evaluator", "hff2=ff(transform=adapt_costs(plusone))", - "--search", """iterated([ - lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], - cost_type=one,reopen_closed=false), - lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2], - reopen_closed=false), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1) - ],repeat_last=true,continue_on_fail=true)""", - # Append --always to be on the safe side if we want to append - # additional options later. - "--always"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time", "memory"]) - -exp.add_archive_step(ARCHIVE_PATH) - -exp.run_steps() diff --git a/experiments/issue1070/v2-lama-first.py b/experiments/issue1070/v2-lama-first.py deleted file mode 100755 index 821efe7788..0000000000 --- a/experiments/issue1070/v2-lama-first.py +++ /dev/null @@ -1,59 +0,0 @@ -#! /usr/bin/env python3 - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ARCHIVE_PATH = "ai/downward/issue1070" -DIR = os.path.dirname(os.path.abspath(__file__)) -REPO_DIR = os.environ["DOWNWARD_REPO"] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = [ - "issue1070-v1", - "issue1070-v2", -] -CONFIGS = [ - IssueConfig("lama-first-pref", - ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - "lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time", "memory"]) - -exp.add_archive_step(ARCHIVE_PATH) - -exp.run_steps() diff --git a/experiments/issue1070/v2-lama.py b/experiments/issue1070/v2-lama.py deleted file mode 100755 index c0ce3aae98..0000000000 --- a/experiments/issue1070/v2-lama.py +++ /dev/null @@ -1,84 +0,0 @@ -#! /usr/bin/env python3 - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ARCHIVE_PATH = "ai/downward/issue1070" -DIR = os.path.dirname(os.path.abspath(__file__)) -REPO_DIR = os.environ["DOWNWARD_REPO"] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = [ - "issue1070-v1", - "issue1070-v2", -] -CONFIGS = [ - IssueConfig("lama-pref", - ["--if-unit-cost", - "--evaluator", - "hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),pref=true)", - "--evaluator", "hff=ff()", - "--search", """iterated([ - lazy_greedy([hff,hlm],preferred=[hff,hlm]), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1) - ],repeat_last=true,continue_on_fail=true)""", - "--if-non-unit-cost", - "--evaluator", - "hlm1=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff1=ff(transform=adapt_costs(one))", - "--evaluator", - "hlm2=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone),pref=true)", - "--evaluator", "hff2=ff(transform=adapt_costs(plusone))", - "--search", """iterated([ - lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], - cost_type=one,reopen_closed=false), - lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2], - reopen_closed=false), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1) - ],repeat_last=true,continue_on_fail=true)""", - # Append --always to be on the safe side if we want to append - # additional options later. - "--always"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time", "memory"]) - -exp.add_archive_step(ARCHIVE_PATH) - -exp.run_steps() diff --git a/experiments/issue1070/v3-lama-first.py b/experiments/issue1070/v3-lama-first.py deleted file mode 100755 index 21e862b190..0000000000 --- a/experiments/issue1070/v3-lama-first.py +++ /dev/null @@ -1,59 +0,0 @@ -#! /usr/bin/env python3 - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ARCHIVE_PATH = "ai/downward/issue1070" -DIR = os.path.dirname(os.path.abspath(__file__)) -REPO_DIR = os.environ["DOWNWARD_REPO"] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = [ - "issue1070-v1", - "issue1070-v3", -] -CONFIGS = [ - IssueConfig("lama-first-pref", - ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - "lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time", "memory"]) - -exp.add_archive_step(ARCHIVE_PATH) - -exp.run_steps() diff --git a/experiments/issue1070/v3-lama.py b/experiments/issue1070/v3-lama.py deleted file mode 100755 index 1a80c4ae87..0000000000 --- a/experiments/issue1070/v3-lama.py +++ /dev/null @@ -1,84 +0,0 @@ -#! /usr/bin/env python3 - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ARCHIVE_PATH = "ai/downward/issue1070" -DIR = os.path.dirname(os.path.abspath(__file__)) -REPO_DIR = os.environ["DOWNWARD_REPO"] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = [ - "issue1070-v1", - "issue1070-v3", -] -CONFIGS = [ - IssueConfig("lama-pref", - ["--if-unit-cost", - "--evaluator", - "hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),pref=true)", - "--evaluator", "hff=ff()", - "--search", """iterated([ - lazy_greedy([hff,hlm],preferred=[hff,hlm]), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1) - ],repeat_last=true,continue_on_fail=true)""", - "--if-non-unit-cost", - "--evaluator", - "hlm1=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff1=ff(transform=adapt_costs(one))", - "--evaluator", - "hlm2=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone),pref=true)", - "--evaluator", "hff2=ff(transform=adapt_costs(plusone))", - "--search", """iterated([ - lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], - cost_type=one,reopen_closed=false), - lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2], - reopen_closed=false), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1) - ],repeat_last=true,continue_on_fail=true)""", - # Append --always to be on the safe side if we want to append - # additional options later. - "--always"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time", "memory"]) - -exp.add_archive_step(ARCHIVE_PATH) - -exp.run_steps() diff --git a/experiments/issue1070/v4-lama-first.py b/experiments/issue1070/v4-lama-first.py deleted file mode 100755 index f8af224a65..0000000000 --- a/experiments/issue1070/v4-lama-first.py +++ /dev/null @@ -1,60 +0,0 @@ -#! /usr/bin/env python3 - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ARCHIVE_PATH = "ai/downward/issue1070" -DIR = os.path.dirname(os.path.abspath(__file__)) -REPO_DIR = os.environ["DOWNWARD_REPO"] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = [ - "issue1070-base", - "issue1070-v4", -] -CONFIGS = [ - IssueConfig("lama-first-pref", - ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - "lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["search_time", "cost"]) -exp.add_scatter_plot_step(relative=False, attributes=["search_time", "cost"]) - -exp.add_archive_step(ARCHIVE_PATH) - -exp.run_steps() diff --git a/experiments/issue1070/v4-lama.py b/experiments/issue1070/v4-lama.py deleted file mode 100755 index 830745aeca..0000000000 --- a/experiments/issue1070/v4-lama.py +++ /dev/null @@ -1,85 +0,0 @@ -#! /usr/bin/env python3 - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ARCHIVE_PATH = "ai/downward/issue1070" -DIR = os.path.dirname(os.path.abspath(__file__)) -REPO_DIR = os.environ["DOWNWARD_REPO"] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = [ - "issue1070-base", - "issue1070-v4", -] -CONFIGS = [ - IssueConfig("lama-pref", - ["--if-unit-cost", - "--evaluator", - "hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),pref=true)", - "--evaluator", "hff=ff()", - "--search", """iterated([ - lazy_greedy([hff,hlm],preferred=[hff,hlm]), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1) - ],repeat_last=true,continue_on_fail=true)""", - "--if-non-unit-cost", - "--evaluator", - "hlm1=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff1=ff(transform=adapt_costs(one))", - "--evaluator", - "hlm2=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone),pref=true)", - "--evaluator", "hff2=ff(transform=adapt_costs(plusone))", - "--search", """iterated([ - lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], - cost_type=one,reopen_closed=false), - lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2], - reopen_closed=false), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1) - ],repeat_last=true,continue_on_fail=true)""", - # Append --always to be on the safe side if we want to append - # additional options later. - "--always"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["cost"]) -exp.add_scatter_plot_step(relative=False, attributes=["cost"]) - -exp.add_archive_step(ARCHIVE_PATH) - -exp.run_steps() diff --git a/experiments/issue1070/v5-lama-first.py b/experiments/issue1070/v5-lama-first.py deleted file mode 100755 index bbf9449658..0000000000 --- a/experiments/issue1070/v5-lama-first.py +++ /dev/null @@ -1,60 +0,0 @@ -#! /usr/bin/env python3 - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ARCHIVE_PATH = "ai/downward/issue1070" -DIR = os.path.dirname(os.path.abspath(__file__)) -REPO_DIR = os.environ["DOWNWARD_REPO"] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = [ - "issue1070-base", - "issue1070-v5", -] -CONFIGS = [ - IssueConfig("lama-first-pref", - ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - "lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["search_time", "cost"]) -exp.add_scatter_plot_step(relative=False, attributes=["search_time", "cost"]) - -exp.add_archive_step(ARCHIVE_PATH) - -exp.run_steps() diff --git a/experiments/issue1070/v5-lama.py b/experiments/issue1070/v5-lama.py deleted file mode 100755 index a598eadbbe..0000000000 --- a/experiments/issue1070/v5-lama.py +++ /dev/null @@ -1,85 +0,0 @@ -#! /usr/bin/env python3 - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ARCHIVE_PATH = "ai/downward/issue1070" -DIR = os.path.dirname(os.path.abspath(__file__)) -REPO_DIR = os.environ["DOWNWARD_REPO"] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = [ - "issue1070-base", - "issue1070-v5", -] -CONFIGS = [ - IssueConfig("lama-pref", - ["--if-unit-cost", - "--evaluator", - "hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),pref=true)", - "--evaluator", "hff=ff()", - "--search", """iterated([ - lazy_greedy([hff,hlm],preferred=[hff,hlm]), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1) - ],repeat_last=true,continue_on_fail=true)""", - "--if-non-unit-cost", - "--evaluator", - "hlm1=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff1=ff(transform=adapt_costs(one))", - "--evaluator", - "hlm2=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone),pref=true)", - "--evaluator", "hff2=ff(transform=adapt_costs(plusone))", - "--search", """iterated([ - lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], - cost_type=one,reopen_closed=false), - lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2], - reopen_closed=false), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1) - ],repeat_last=true,continue_on_fail=true)""", - # Append --always to be on the safe side if we want to append - # additional options later. - "--always"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["cost"]) -exp.add_scatter_plot_step(relative=False, attributes=["cost"]) - -exp.add_archive_step(ARCHIVE_PATH) - -exp.run_steps() diff --git a/experiments/issue1070/v6-lama-first.py b/experiments/issue1070/v6-lama-first.py deleted file mode 100755 index 8ac2b0b63f..0000000000 --- a/experiments/issue1070/v6-lama-first.py +++ /dev/null @@ -1,60 +0,0 @@ -#! /usr/bin/env python3 - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ARCHIVE_PATH = "ai/downward/issue1070" -DIR = os.path.dirname(os.path.abspath(__file__)) -REPO_DIR = os.environ["DOWNWARD_REPO"] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = [ - "issue1070-base", - "issue1070-v6", -] -CONFIGS = [ - IssueConfig("lama-first-pref", - ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - "lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["search_time", "cost"]) -exp.add_scatter_plot_step(relative=False, attributes=["search_time", "cost"]) - -exp.add_archive_step(ARCHIVE_PATH) - -exp.run_steps() diff --git a/experiments/issue1070/v6-lama.py b/experiments/issue1070/v6-lama.py deleted file mode 100755 index 4dcc960123..0000000000 --- a/experiments/issue1070/v6-lama.py +++ /dev/null @@ -1,85 +0,0 @@ -#! /usr/bin/env python3 - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ARCHIVE_PATH = "ai/downward/issue1070" -DIR = os.path.dirname(os.path.abspath(__file__)) -REPO_DIR = os.environ["DOWNWARD_REPO"] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = [ - "issue1070-base", - "issue1070-v6", -] -CONFIGS = [ - IssueConfig("lama-pref", - ["--if-unit-cost", - "--evaluator", - "hlm=lmcount(lm_reasonable_orders_hps(lm_rhw()),pref=true)", - "--evaluator", "hff=ff()", - "--search", """iterated([ - lazy_greedy([hff,hlm],preferred=[hff,hlm]), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1) - ],repeat_last=true,continue_on_fail=true)""", - "--if-non-unit-cost", - "--evaluator", - "hlm1=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff1=ff(transform=adapt_costs(one))", - "--evaluator", - "hlm2=lmcount(lm_reasonable_orders_hps(lm_rhw()),transform=adapt_costs(plusone),pref=true)", - "--evaluator", "hff2=ff(transform=adapt_costs(plusone))", - "--search", """iterated([ - lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], - cost_type=one,reopen_closed=false), - lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2], - reopen_closed=false), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1) - ],repeat_last=true,continue_on_fail=true)""", - # Append --always to be on the safe side if we want to append - # additional options later. - "--always"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["cost"]) -exp.add_scatter_plot_step(relative=False, attributes=["cost"]) - -exp.add_archive_step(ARCHIVE_PATH) - -exp.run_steps() diff --git a/experiments/issue123/README b/experiments/issue123/README deleted file mode 100644 index c0cad7402f..0000000000 --- a/experiments/issue123/README +++ /dev/null @@ -1,4 +0,0 @@ -In order to use the supplied experiment script issue123.py, you need -to update the variable REPO_NAME (or REPO) to provide the path to -your repository. Furthermore, standard_experiment.py needs to have -updated paths (at the top of the file, search for seiverss). diff --git a/experiments/issue123/issue123.py b/experiments/issue123/issue123.py deleted file mode 100755 index 9795c28d54..0000000000 --- a/experiments/issue123/issue123.py +++ /dev/null @@ -1,140 +0,0 @@ -#! /usr/bin/env python - -from standard_experiment import REMOTE, get_exp - -from downward import suites -#from lab.reports import Attribute, avg - -import os.path - -# Set the following variables for the experiment -REPO_NAME = 'fd-issue123' -# revisions, e.g. ['3d6c1ccacdce'] -REVISIONS = ['issue123-base'] -# suites, e.g. ['gripper:prob01.pddl', 'zenotravel:pfile1'] or suites.suite_satisficing_with_ipc11() -LOCAL_SUITE = ['depot:pfile1'] -GRID_SUITE = suites.suite_satisficing_with_ipc11() -# configs, e.g. '--search', 'astar(lmcut())' for config -CONFIGS = { - - 'lama-2011': [ - "--if-unit-cost", - "--heuristic", - "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true))", - "--search", "iterated([" - " lazy_greedy([hff,hlm],preferred=[hff,hlm])," - " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5)," - " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3)," - " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2)," - " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1)" - " ],repeat_last=true,continue_on_fail=true)", - "--if-non-unit-cost", - "--heuristic", - "hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true," - " lm_cost_type=one,cost_type=one))", - "--heuristic", - "hlm2,hff2=lm_ff_syn(lm_rhw(reasonable_orders=true," - " lm_cost_type=plusone,cost_type=plusone))", - "--search", "iterated([" - " lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1]," - " cost_type=one,reopen_closed=false)," - " lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2]," - " reopen_closed=false)," - " lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5)," - " lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3)," - " lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2)," - " lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1)" - " ],repeat_last=true,continue_on_fail=true)", - ], - - 'lama-2011-first-it': [ - "--heuristic", - "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true," - " lm_cost_type=one,cost_type=one))", - "--search", - "lazy_greedy([hff,hlm],preferred=[hff,hlm],cost_type=one)" - ], - - 'lama-2011-separated': [ - "--if-unit-cost", - "--heuristic", - "hlm=lmcount(lm_rhw(reasonable_orders=true),pref=true)", - "--heuristic", - "hff=ff()", - "--search", "iterated([" - " lazy_greedy([hff,hlm],preferred=[hff,hlm])," - " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5)," - " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3)," - " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2)," - " lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1)" - " ],repeat_last=true,continue_on_fail=true)", - "--if-non-unit-cost", - "--heuristic", - "hlm1=lmcount(lm_rhw(reasonable_orders=true," - " lm_cost_type=one,cost_type=one)," - " pref=true,cost_type=one)", - "--heuristic", - "hff1=ff(cost_type=one)", - "--heuristic", - "hlm2=lmcount(lm_rhw(reasonable_orders=true," - " lm_cost_type=plusone,cost_type=plusone)," - " pref=true,cost_type=plusone)", - "--heuristic", - "hff2=ff(cost_type=plusone)", - "--search", "iterated([" - " lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1]," - " cost_type=one,reopen_closed=false)," - " lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2]," - " reopen_closed=false)," - " lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5)," - " lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3)," - " lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2)," - " lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1)" - " ],repeat_last=true,continue_on_fail=true)", - ], - - 'lama-2011-first-it-separated': [ - "--heuristic", - "hlm=lmcount(lm_rhw(reasonable_orders=true," - " lm_cost_type=one,cost_type=one)," - " pref=true,cost_type=one)", - "--heuristic", - "hff=ff(cost_type=one)", - "--search", - "lazy_greedy([hff,hlm],preferred=[hff,hlm],cost_type=one)", - ], -} - -# limits, e.g. { 'search_time': 120 } -LIMITS = None -# for 'make debug', set to True. -COMPILATION_OPTION = None #(default: 'release') -# choose any lower priority if whished -PRIORITY = None #(default: 0) - - -# Do not change anything below here -SCRIPT_PATH = os.path.abspath(__file__) -if REMOTE: - SUITE = GRID_SUITE - REPO = os.path.expanduser('~/repos/' + REPO_NAME) -else: - SUITE = LOCAL_SUITE - REPO = os.path.expanduser('~/work/' + REPO_NAME) - - -# Create the experiment. Add parsers, fetchers or reports... -exp = get_exp(script_path=SCRIPT_PATH, repo=REPO, suite=SUITE, - configs=CONFIGS, revisions=REVISIONS, limits=LIMITS, - compilation_option=COMPILATION_OPTION, priority=PRIORITY) -exp.add_score_attributes() -exp.add_extra_attributes(['quality']) - -REV = REVISIONS[0] -configs_lama = [('%s-lama-2011' % REV, '%s-lama-2011-separated' % REV)] -exp.add_configs_report(compared_configs=configs_lama, name='lama') -configs_lama_first_it = [('%s-lama-2011-first-it' % REV, '%s-lama-2011-first-it-separated' % REV)] -exp.add_configs_report(compared_configs=configs_lama_first_it, name='lama-first-it') -exp.add_absolute_report() - -exp() diff --git a/experiments/issue182/common_setup.py b/experiments/issue182/common_setup.py deleted file mode 100644 index ff9d70d6f7..0000000000 --- a/experiments/issue182/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "initial_h_value", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, suite, revisions=[], configs={}, grid_priority=None, - path=None, test_suite=None, email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(os.path.join(repo, "benchmarks"), suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue182/suites.py b/experiments/issue182/suites.py deleted file mode 100644 index ec030b6d4c..0000000000 --- a/experiments/issue182/suites.py +++ /dev/null @@ -1,315 +0,0 @@ -# Benchmark suites from the Fast Downward benchmark collection. - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-agl14-adl', - 'citycar-agl14-adl', - 'maintenance-agl14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-agl14-strips', - 'childsnack-agl14-strips', - 'floortile-agl14-strips', - 'ged-agl14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-agl14-strips', - 'tetris-agl14-strips', - 'thoughtful-agl14-strips', - 'transport-agl14-strips', - 'visitall-agl14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-mco14-adl', - 'citycar-mco14-adl', - 'maintenance-mco14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-mco14-strips', - 'floortile-mco14-strips', - 'ged-mco14-strips', - 'hiking-mco14-strips', - 'openstacks-mco14-strips', - 'parking-mco14-strips', - 'tetris-mco14-strips', - 'thoughtful-mco14-strips', - 'transport-mco14-strips', - 'visitall-mco14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-opt14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-sat14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat()) - - -def suite_unsolvable(): - # TODO: Add other unsolvable problems (Miconic-FullADL). - # TODO: Add 'fsc-grid-r:prize5x5_R.pddl' and 't0-uts:uts_r-02.pddl' - # if the extra-domains branch is merged. - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_alternative_formulations()) diff --git a/experiments/issue182/v1-no-cache.py b/experiments/issue182/v1-no-cache.py deleted file mode 100755 index 4068b4dba7..0000000000 --- a/experiments/issue182/v1-no-cache.py +++ /dev/null @@ -1,40 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -from common_setup import IssueConfig, IssueExperiment -import suites - - -heuristics = [ - "{}(cache_estimates=false)".format(h) for h in ( - "pdb", "cpdbs", "diverse_potentials", "all_states_potential", - "initial_state_potential", "sample_based_potentials")] - -max_eval = "max([{}])".format(",".join(heuristics)) -ipc_max = "ipc_max([{}],cache_estimates=false)".format(",".join(heuristics)) - -configs = [ - IssueConfig( - name, - ["--search", "astar({})".format(eval_)]) - for name, eval_ in [("max", max_eval), ("ipc_max", ipc_max)] -] -revision = "8f1563b36fc7" - -exp = IssueExperiment( - revisions=[revision], - configs=configs, - suite=suites.suite_optimal_strips(), - test_suite=["depot:pfile1"], - email="jendrik.seipp@unibas.ch", -) - -exp.add_absolute_report_step() -exp.add_report( - common_setup.CompareConfigsReport( - [(revision + "-" + "ipc_max", revision + "-" + "max")], - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES), - name=common_setup.get_experiment_name() + "-compare") - -exp() diff --git a/experiments/issue182/v1.py b/experiments/issue182/v1.py deleted file mode 100755 index 886f92f32b..0000000000 --- a/experiments/issue182/v1.py +++ /dev/null @@ -1,26 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from common_setup import IssueConfig, IssueExperiment -import suites - - -configs = [ - IssueConfig( - func, - ["--search", "astar({}([ipdb(max_time=5),diverse_potentials(),all_states_potential(),initial_state_potential(),sample_based_potentials()]))".format(func)]) - for func in ["max", "ipc_max"] -] -revisions = ["8f1563b36fc7"] - -exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suites.suite_optimal_strips(), - test_suite=["depot:pfile1"], - email="jendrik.seipp@unibas.ch", -) - -exp.add_absolute_report_step() - -exp() diff --git a/experiments/issue182/v2.py b/experiments/issue182/v2.py deleted file mode 100755 index 3cb37836db..0000000000 --- a/experiments/issue182/v2.py +++ /dev/null @@ -1,40 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -from common_setup import IssueConfig, IssueExperiment -import suites - - -heuristics = [ - "{}(cache_estimates=false)".format(h) for h in ( - "pdb", "cpdbs", "diverse_potentials", "all_states_potential", - "initial_state_potential", "sample_based_potentials")] - -max_eval = "max([{}])".format(",".join(heuristics)) -ipc_max = "ipc_max([{}],cache_estimates=false)".format(",".join(heuristics)) - -configs = [ - IssueConfig( - name, - ["--search", "astar({})".format(eval_)]) - for name, eval_ in [("max", max_eval), ("ipc_max", ipc_max)] -] -revision = "1e84d77e4e37" - -exp = IssueExperiment( - revisions=[revision], - configs=configs, - suite=suites.suite_optimal_strips(), - test_suite=["depot:pfile1"], - email="jendrik.seipp@unibas.ch", -) - -exp.add_absolute_report_step() -exp.add_report( - common_setup.CompareConfigsReport( - [(revision + "-" + "ipc_max", revision + "-" + "max")], - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES), - name=common_setup.get_experiment_name() + "-compare") - -exp() diff --git a/experiments/issue198/common_setup.py b/experiments/issue198/common_setup.py deleted file mode 100644 index 687019c482..0000000000 --- a/experiments/issue198/common_setup.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue198/relativescatter.py b/experiments/issue198/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue198/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue198/v2-opt.py b/experiments/issue198/v2-opt.py deleted file mode 100755 index ecea6c137b..0000000000 --- a/experiments/issue198/v2-opt.py +++ /dev/null @@ -1,60 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue198-v2-base", "issue198-v2"] -CONFIGS = [ - IssueConfig('astar_blind', ['--search', 'astar(blind)']), - IssueConfig('seq-opt-bjolp', [], driver_options=["--alias", "seq-opt-bjolp"]), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="salome.eriksson@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parse_again_step() - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue213/base-opt.py b/experiments/issue213/base-opt.py deleted file mode 100755 index 4af10e2e36..0000000000 --- a/experiments/issue213/base-opt.py +++ /dev/null @@ -1,71 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISION = "issue213-base" -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("bjolp", "astar(lmcount(lm_merged([lm_rhw(), lm_hm(m=1)]), admissible=true), mpd=true)"), - ("blind", "astar(blind())"), - ("cegar", "astar(cegar())"), - ("divpot", "astar(diverse_potentials())"), - ("ipdb", "astar(ipdb(max_time=900))"), - ("lmcut", "astar(lmcut())"), - ("mas", - "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false), " - "merge_strategy=merge_dfp(), " - "label_reduction=exact(before_shrinking=true, before_merging=false), max_states=100000, threshold_before_merge=1))"), - ("seq", "astar(operatorcounting([state_equation_constraints()]))"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=[REVISION], - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() - -algorithm_pairs = [] -for build1, build2 in itertools.combinations(BUILDS, 2): - for config_nick, search in SEARCHES: - algorithm_pairs.append( - ("{REVISION}-{config_nick}-{build1}".format(**locals()), - "{REVISION}-{config_nick}-{build2}".format(**locals()), - "Diff ({})".format(config_nick))) -exp.add_report( - ComparativeReport( - algorithm_pairs, - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES), - name="issue213-opt-comparison") -#exp.add_scatter_plot_step(attributes=["total_time", "memory"]) - -exp.run_steps() diff --git a/experiments/issue213/common_setup.py b/experiments/issue213/common_setup.py deleted file mode 100644 index 18d9aad057..0000000000 --- a/experiments/issue213/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "quality", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue213/custom-parser.py b/experiments/issue213/custom-parser.py deleted file mode 100755 index d06d7b52a2..0000000000 --- a/experiments/issue213/custom-parser.py +++ /dev/null @@ -1,21 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - - -def main(): - parser = Parser() - parser.add_pattern( - "hash_set_load_factor", - "Hash set load factor: \d+/\d+ = (.+)", - required=False, - type=float) - parser.add_pattern( - "hash_set_resizings", - "Hash set resizings: (\d+)", - required=False, - type=int) - print "Running custom parser" - parser.parse() - -main() diff --git a/experiments/issue213/relativescatter.py b/experiments/issue213/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue213/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue213/v2-blind-m32.py b/experiments/issue213/v2-blind-m32.py deleted file mode 100755 index 50e11e10a3..0000000000 --- a/experiments/issue213/v2-blind-m32.py +++ /dev/null @@ -1,49 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v1", "issue213-v2"] -BUILDS = ["release32"] -SEARCHES = [ - ("blind", "astar(blind())"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(attributes=["total_time", "memory"]) - -exp.run_steps() diff --git a/experiments/issue213/v2-blind-m64.py b/experiments/issue213/v2-blind-m64.py deleted file mode 100755 index 942d9bab0f..0000000000 --- a/experiments/issue213/v2-blind-m64.py +++ /dev/null @@ -1,49 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v1", "issue213-v2"] -BUILDS = ["release64"] -SEARCHES = [ - ("blind", "astar(blind())"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(attributes=["total_time", "memory"]) - -exp.run_steps() diff --git a/experiments/issue213/v3-blind.py b/experiments/issue213/v3-blind.py deleted file mode 100755 index 10e4bb6469..0000000000 --- a/experiments/issue213/v3-blind.py +++ /dev/null @@ -1,61 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v2", "issue213-v3"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("blind", "astar(blind())"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() - -algorithm_pairs = [] -revision1, revision2 = REVISIONS -for build in BUILDS: - for config_nick, search in SEARCHES: - algorithm_pairs.append( - ("{revision1}-{config_nick}-{build}".format(**locals()), - "{revision2}-{config_nick}-{build}".format(**locals()), - "Diff ({config_nick}-{build})".format(**locals()))) -exp.add_report( - ComparativeReport( - algorithm_pairs, - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES), - name="issue213-v2-vs-v3-blind") - -exp.run_steps() diff --git a/experiments/issue213/v3-opt.py b/experiments/issue213/v3-opt.py deleted file mode 100755 index b307256fb5..0000000000 --- a/experiments/issue213/v3-opt.py +++ /dev/null @@ -1,109 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-base", "issue213-v1", "issue213-v3"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("bjolp", "astar(lmcount(lm_merged([lm_rhw(), lm_hm(m=1)]), admissible=true), mpd=true)"), - ("blind", "astar(blind())"), - ("cegar", "astar(cegar())"), - ("divpot", "astar(diverse_potentials())"), - ("ipdb", "astar(ipdb(max_time=900))"), - ("lmcut", "astar(lmcut())"), - ("mas", - "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false), " - "merge_strategy=merge_dfp(), " - "label_reduction=exact(before_shrinking=true, before_merging=false), max_states=100000, threshold_before_merge=1))"), - ("seq", "astar(operatorcounting([state_equation_constraints()]))"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() - -attributes = [ - "coverage", "error", "expansions_until_last_jump", "memory", - "score_memory", "total_time", "score_total_time"] - -# Compare revisions. -# lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32 -# lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64 -for build in BUILDS: - for rev1, rev2 in itertools.combinations(REVISIONS, 2): - algorithm_pairs = [ - ("{rev1}-{config_nick}-{build}".format(**locals()), - "{rev2}-{config_nick}-{build}".format(**locals()), - "Diff ({config_nick}-{build})".format(**locals())) - for config_nick, search in SEARCHES] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{rev1}-vs-{rev2}-{build}".format(**locals())) - -# Compare builds. -# lmcut-base-32 vs. lmcut-base-64 -# lmcut-v1-32 vs. lmcut-v1-64 -# lmcut-v3-32 vs. lmcut v3-64 -for build1, build2 in itertools.combinations(BUILDS, 2): - for rev in REVISIONS: - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick, search in SEARCHES] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{build1}-vs-{build2}-{rev}".format(**locals())) - -# Compare across revisions and builds. -# lmcut-base-32 vs. lmcut-v3-64 -build1, build2 = BUILDS -rev1, rev2 = "issue213-base", "issue213-v3" -algorithm_pairs = [ - ("{rev1}-{config_nick}-{build1}".format(**locals()), - "{rev2}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick})".format(**locals())) - for config_nick, search in SEARCHES] -exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-before-vs-after") - -for attribute in ["total_time", "memory"]: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["issue213-v1-blind-release32", "issue213-v3-blind-release32"]), - name="issue213-relative-scatter-blind-m32-v1-vs-v3-{}".format(attribute)) - -exp.run_steps() diff --git a/experiments/issue213/v4-blind.py b/experiments/issue213/v4-blind.py deleted file mode 100755 index e1b6ee52dc..0000000000 --- a/experiments/issue213/v4-blind.py +++ /dev/null @@ -1,87 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v1", "issue213-v3", "issue213-v4"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("blind", "astar(blind())"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() - -attributes = [ - "coverage", "error", "expansions_until_last_jump", "memory", - "score_memory", "total_time", "score_total_time", - "hash_set_load_factor", "hash_set_resizings"] - -# Compare revisions. -# lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32 -# lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64 -for build in BUILDS: - for rev1, rev2 in itertools.combinations(REVISIONS, 2): - algorithm_pairs = [ - ("{rev1}-{config_nick}-{build}".format(**locals()), - "{rev2}-{config_nick}-{build}".format(**locals()), - "Diff ({config_nick}-{build})".format(**locals())) - for config_nick, search in SEARCHES] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{rev1}-vs-{rev2}-{build}".format(**locals())) - -# Compare builds. -# lmcut-base-32 vs. lmcut-base-64 -# lmcut-v1-32 vs. lmcut-v1-64 -# lmcut-v3-32 vs. lmcut v3-64 -for build1, build2 in itertools.combinations(BUILDS, 2): - for rev in REVISIONS: - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick, search in SEARCHES] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{build1}-vs-{build2}-{rev}".format(**locals())) - -for attribute in ["total_time", "memory"]: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["issue213-v1-blind-release64", "issue213-v4-blind-release64"]), - name="issue213-relative-scatter-blind-m64-v1-vs-v4-{}".format(attribute)) - -exp.run_steps() diff --git a/experiments/issue213/v5-blind.py b/experiments/issue213/v5-blind.py deleted file mode 100755 index 7a01a9fd12..0000000000 --- a/experiments/issue213/v5-blind.py +++ /dev/null @@ -1,89 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v1", "issue213-v4", "issue213-v5"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("blind", "astar(blind())"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = ["pegsol-opt11-strips"] # common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_command('run-custom-parser', [os.path.join(DIR, 'custom-parser.py')]) -exp.add_absolute_report_step() - -attributes = [ - "coverage", "error", "expansions_until_last_jump", "memory", - "score_memory", "total_time", "score_total_time", - "hash_set_load_factor", "hash_set_resizings"] - -# Compare revisions. -# lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32 -# lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64 -for build in BUILDS: - for rev1, rev2 in itertools.combinations(REVISIONS, 2): - algorithm_pairs = [ - ("{rev1}-{config_nick}-{build}".format(**locals()), - "{rev2}-{config_nick}-{build}".format(**locals()), - "Diff ({config_nick}-{build})".format(**locals())) - for config_nick, search in SEARCHES] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{rev1}-vs-{rev2}-{build}".format(**locals())) - -# Compare builds. -# lmcut-base-32 vs. lmcut-base-64 -# lmcut-v1-32 vs. lmcut-v1-64 -# lmcut-v3-32 vs. lmcut v3-64 -for build1, build2 in itertools.combinations(BUILDS, 2): - for rev in REVISIONS: - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick, search in SEARCHES] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{build1}-vs-{build2}-{rev}".format(**locals())) - -for attribute in ["total_time", "memory"]: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["issue213-v1-blind-release64", "issue213-v4-blind-release64"]), - name="issue213-relative-scatter-blind-m64-v1-vs-v4-{}".format(attribute)) - -exp.run_steps() diff --git a/experiments/issue213/v6-blind.py b/experiments/issue213/v6-blind.py deleted file mode 100755 index 0679f4c33c..0000000000 --- a/experiments/issue213/v6-blind.py +++ /dev/null @@ -1,89 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v1", "issue213-v5", "issue213-v6"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("blind", "astar(blind())"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_command('run-custom-parser', [os.path.join(DIR, 'custom-parser.py')]) -exp.add_absolute_report_step() - -attributes = [ - "coverage", "error", "expansions_until_last_jump", "memory", - "score_memory", "total_time", "score_total_time", - "hash_set_load_factor", "hash_set_resizings"] - -# Compare revisions. -# lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32 -# lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64 -for build in BUILDS: - for rev1, rev2 in itertools.combinations(REVISIONS, 2): - algorithm_pairs = [ - ("{rev1}-{config_nick}-{build}".format(**locals()), - "{rev2}-{config_nick}-{build}".format(**locals()), - "Diff ({config_nick}-{build})".format(**locals())) - for config_nick, search in SEARCHES] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{rev1}-vs-{rev2}-{build}".format(**locals())) - -# Compare builds. -# lmcut-base-32 vs. lmcut-base-64 -# lmcut-v1-32 vs. lmcut-v1-64 -# lmcut-v3-32 vs. lmcut v3-64 -for build1, build2 in itertools.combinations(BUILDS, 2): - for rev in REVISIONS: - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick, search in SEARCHES] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{build1}-vs-{build2}-{rev}".format(**locals())) - -for attribute in ["total_time", "memory"]: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["issue213-v1-blind-release64", "issue213-v4-blind-release64"]), - name="issue213-relative-scatter-blind-m64-v1-vs-v4-{}".format(attribute)) - -exp.run_steps() diff --git a/experiments/issue213/v7-lama-30min.py b/experiments/issue213/v7-lama-30min.py deleted file mode 100755 index aceea1ea21..0000000000 --- a/experiments/issue213/v7-lama-30min.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v7"] -BUILDS = ["release32", "release64"] -CONFIGS = [ - IssueConfig( - "lama-" + build, - [], - build_options=[build], - driver_options=["--build", build, "--alias", "seq-sat-lama-2011"]) - for rev in REVISIONS - for build in BUILDS -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick in ["lama"]] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue213/v7-lama-5min.py b/experiments/issue213/v7-lama-5min.py deleted file mode 100755 index 15ad972ae3..0000000000 --- a/experiments/issue213/v7-lama-5min.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v7"] -BUILDS = ["release32", "release64"] -CONFIGS = [ - IssueConfig( - "lama-" + build, - [], - build_options=[build], - driver_options=["--build", build, "--alias", "seq-sat-lama-2011", "--overall-time-limit", "5m"]) - for rev in REVISIONS - for build in BUILDS -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick in ["lama"]] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue213/v7-lama-first-30min.py b/experiments/issue213/v7-lama-first-30min.py deleted file mode 100755 index 44c2daca8b..0000000000 --- a/experiments/issue213/v7-lama-first-30min.py +++ /dev/null @@ -1,88 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v7"] -BUILDS = ["release32", "release64"] -CONFIG_NICKS = [ - ("lama-first-syn", [ - "--heuristic", - """hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one), - transform=adapt_costs(one))""", - "--heuristic", "hff=ff_synergy(hlm)", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false)"""]), - ("lama-first-no-syn", [ - "--heuristic", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true, lm_cost_type=one), transform=adapt_costs(one))", - "--heuristic", "hff=ff(transform=adapt_costs(one))", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false)"""]), -] -CONFIGS = [ - IssueConfig( - config_nick + "-" + build, - config, - build_options=[build], - driver_options=["--build", build]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick, _ in CONFIG_NICKS] - print algorithm_pairs - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue213/v7-lama-first-pref-30min.py b/experiments/issue213/v7-lama-first-pref-30min.py deleted file mode 100755 index eb9777ef84..0000000000 --- a/experiments/issue213/v7-lama-first-pref-30min.py +++ /dev/null @@ -1,94 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v7"] -BUILDS = ["release32", "release64"] -CONFIG_NICKS = [ - ("lama-first-syn", [ - "--heuristic", - """hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one), - transform=adapt_costs(one))""", - "--heuristic", "hff=ff_synergy(hlm)", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false)"""]), - ("lama-first-no-syn-pref-false", [ - "--heuristic", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true, lm_cost_type=one), transform=adapt_costs(one), pref=false)", - "--heuristic", "hff=ff(transform=adapt_costs(one))", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false)"""]), - ("lama-first-no-syn-pref-true", [ - "--heuristic", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true, lm_cost_type=one), transform=adapt_costs(one), pref=true)", - "--heuristic", "hff=ff(transform=adapt_costs(one))", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false)"""]), -] -CONFIGS = [ - IssueConfig( - config_nick + "-" + build, - config, - build_options=[build], - driver_options=["--build", build]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick, _ in CONFIG_NICKS] - print algorithm_pairs - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue213/v7-opt-30min.py b/experiments/issue213/v7-opt-30min.py deleted file mode 100755 index 5ceeeb4dd5..0000000000 --- a/experiments/issue213/v7-opt-30min.py +++ /dev/null @@ -1,91 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v7"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("bjolp", "astar(lmcount(lm_merged([lm_rhw(), lm_hm(m=1)]), admissible=true), mpd=true)"), - ("blind", "astar(blind())"), - ("cegar", "astar(cegar())"), - ("divpot", "astar(diverse_potentials())"), - ("ipdb", "astar(ipdb())"), - ("lmcut", "astar(lmcut())"), - ("mas", - "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false)," - " merge_strategy=merge_sccs(order_of_sccs=topological," - " merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order]))," - " label_reduction=exact(before_shrinking=true, before_merging=false)," - " max_states=50000, threshold_before_merge=1))"), - ("seq", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"), - ("blind-sss-simple", "astar(blind(), pruning=stubborn_sets_simple())"), - ("blind-sss-ec", "astar(blind(), pruning=stubborn_sets_ec())"), - ("h2", "astar(hm(m=2))"), - ("hmax", "astar(hmax())"), -] -CONFIGS = [ - IssueConfig( - "-".join([search_nick, build]), - ["--search", search], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for rev in REVISIONS - for build in BUILDS - for search_nick, search in SEARCHES -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - algorithm_pairs = [ - ("{rev}-{search_nick}-{build1}".format(**locals()), - "{rev}-{search_nick}-{build2}".format(**locals()), - "Diff ({search_nick}-{rev})".format(**locals())) - for search_nick, search in SEARCHES] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue213/v7-opt-5min.py b/experiments/issue213/v7-opt-5min.py deleted file mode 100755 index 88690038e0..0000000000 --- a/experiments/issue213/v7-opt-5min.py +++ /dev/null @@ -1,91 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v7"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("bjolp", "astar(lmcount(lm_merged([lm_rhw(), lm_hm(m=1)]), admissible=true), mpd=true)"), - ("blind", "astar(blind())"), - ("cegar", "astar(cegar())"), - ("divpot", "astar(diverse_potentials())"), - ("ipdb", "astar(ipdb())"), - ("lmcut", "astar(lmcut())"), - ("mas", - "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false)," - " merge_strategy=merge_sccs(order_of_sccs=topological," - " merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order]))," - " label_reduction=exact(before_shrinking=true, before_merging=false)," - " max_states=50000, threshold_before_merge=1))"), - ("occ", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"), - ("blind-sss-simple", "astar(blind(), pruning=stubborn_sets_simple())"), - ("blind-sss-ec", "astar(blind(), pruning=stubborn_sets_ec())"), - ("h2", "astar(hm(m=2))"), - ("hmax", "astar(hmax())"), -] -CONFIGS = [ - IssueConfig( - "-".join([search_nick, build]), - ["--search", search], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for rev in REVISIONS - for build in BUILDS - for search_nick, search in SEARCHES -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - algorithm_pairs = [ - ("{rev}-{search_nick}-{build1}".format(**locals()), - "{rev}-{search_nick}-{build2}".format(**locals()), - "Diff ({search_nick}-{rev})".format(**locals())) - for search_nick, search in SEARCHES] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue213/v7-opt-extra-configs.py b/experiments/issue213/v7-opt-extra-configs.py deleted file mode 100755 index 7239429153..0000000000 --- a/experiments/issue213/v7-opt-extra-configs.py +++ /dev/null @@ -1,78 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v7"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("blind-sss-simple", "astar(blind(), pruning=stubborn_sets_simple())"), - ("blind-sss-ec", "astar(blind(), pruning=stubborn_sets_ec())"), - ("h2", "astar(hm(m=2))"), - ("hmax", "astar(hmax())"), -] -CONFIGS = [ - IssueConfig( - "-".join([search_nick, build]), - ["--search", search], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for rev in REVISIONS - for build in BUILDS - for search_nick, search in SEARCHES -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - for rev in REVISIONS: - algorithm_pairs = [ - ("{rev}-{search_nick}-{build1}".format(**locals()), - "{rev}-{search_nick}-{build2}".format(**locals()), - "Diff ({search_nick}-{rev})".format(**locals())) - for search_nick, search in SEARCHES] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{build1}-vs-{build2}-{rev}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue213/v7-opt.py b/experiments/issue213/v7-opt.py deleted file mode 100755 index 9597ebd23b..0000000000 --- a/experiments/issue213/v7-opt.py +++ /dev/null @@ -1,87 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v7"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("bjolp", "astar(lmcount(lm_merged([lm_rhw(), lm_hm(m=1)]), admissible=true), mpd=true)"), - ("blind", "astar(blind())"), - ("cegar", "astar(cegar())"), - ("divpot", "astar(diverse_potentials())"), - ("ipdb", "astar(ipdb())"), - ("lmcut", "astar(lmcut())"), - ("mas", - "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false)," - " merge_strategy=merge_sccs(order_of_sccs=topological," - " merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order]))," - " label_reduction=exact(before_shrinking=true, before_merging=false)," - " max_states=50000, threshold_before_merge=1))"), - ("seq", "astar(operatorcounting([state_equation_constraints()]))"), -] -CONFIGS = [ - IssueConfig( - "-".join([search_nick, build]), - ["--search", search], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for rev in REVISIONS - for build in BUILDS - for search_nick, search in SEARCHES -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - for rev in REVISIONS: - algorithm_pairs = [ - ("{rev}-{search_nick}-{build1}".format(**locals()), - "{rev}-{search_nick}-{build2}".format(**locals()), - "Diff ({search_nick}-{rev})".format(**locals())) - for search_nick, search in SEARCHES] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{build1}-vs-{build2}-{rev}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue213/v7-sat-30min.py b/experiments/issue213/v7-sat-30min.py deleted file mode 100755 index 4bffee1d01..0000000000 --- a/experiments/issue213/v7-sat-30min.py +++ /dev/null @@ -1,107 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v7"] -BUILDS = ["release32", "release64"] -CONFIG_DICT = { - "eager_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "eager_greedy([h], preferred=[h])"], - "eager_greedy_cea": [ - "--heuristic", - "h=cea()", - "--search", - "eager_greedy([h], preferred=[h])"], - "lazy_greedy_add": [ - "--heuristic", - "h=add()", - "--search", - "lazy_greedy([h], preferred=[h])"], - "lazy_greedy_cg": [ - "--heuristic", - "h=cg()", - "--search", - "lazy_greedy([h], preferred=[h])"], - "lama-first": [ - "--heuristic", - """hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one), - transform=adapt_costs(one))""", - "--heuristic", "hff=ff_synergy(hlm)", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false)"""], - "ff-typed": [ - "--heuristic", "hff=ff()", - "--search", - "lazy(alt([single(hff), single(hff, pref_only=true)," - " type_based([hff, g()])], boost=1000)," - " preferred=[hff], cost_type=one)"], -} -CONFIGS = [ - IssueConfig( - "-".join([config_nick, build]), - config, - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_DICT.items() -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick, _ in CONFIG_DICT.items()] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue213/v7-sat-5min.py b/experiments/issue213/v7-sat-5min.py deleted file mode 100755 index 5d29d4245f..0000000000 --- a/experiments/issue213/v7-sat-5min.py +++ /dev/null @@ -1,107 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v7"] -BUILDS = ["release32", "release64"] -CONFIG_DICT = { - "eager_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "eager_greedy([h], preferred=[h])"], - "eager_greedy_cea": [ - "--heuristic", - "h=cea()", - "--search", - "eager_greedy([h], preferred=[h])"], - "lazy_greedy_add": [ - "--heuristic", - "h=add()", - "--search", - "lazy_greedy([h], preferred=[h])"], - "lazy_greedy_cg": [ - "--heuristic", - "h=cg()", - "--search", - "lazy_greedy([h], preferred=[h])"], - "lama-first": [ - "--heuristic", - """hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one), - transform=adapt_costs(one))""", - "--heuristic", "hff=ff_synergy(hlm)", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false)"""], - "ff-typed": [ - "--heuristic", "hff=ff()", - "--search", - "lazy(alt([single(hff), single(hff, pref_only=true)," - " type_based([hff, g()])], boost=1000)," - " preferred=[hff], cost_type=one)"], -} -CONFIGS = [ - IssueConfig( - "-".join([config_nick, build]), - config, - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_DICT.items() -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick, _ in CONFIG_DICT.items()] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue213/v7-sat-extra-configs.py b/experiments/issue213/v7-sat-extra-configs.py deleted file mode 100755 index 458aa7b548..0000000000 --- a/experiments/issue213/v7-sat-extra-configs.py +++ /dev/null @@ -1,82 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v7"] -BUILDS = ["release32", "release64"] -CONFIG_DICT = { - "lama-first-typed": [ - "--heuristic", "hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one), transform=adapt_costs(one))", - "--heuristic", "hff=ff_synergy(hlm)", - "--search", - "lazy(alt([single(hff), single(hff, pref_only=true)," - " single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000)," - " preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=True," - " preferred_successors_first=False)"], -} -CONFIGS = [ - IssueConfig( - "-".join([config_nick, build]), - config, - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_DICT.items() -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - for rev in REVISIONS: - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick, _ in CONFIG_DICT.items()] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{build1}-vs-{build2}-{rev}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue213/v7-sat.py b/experiments/issue213/v7-sat.py deleted file mode 100755 index dd70c42fa9..0000000000 --- a/experiments/issue213/v7-sat.py +++ /dev/null @@ -1,101 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v7"] -BUILDS = ["release32", "release64"] -CONFIG_DICT = { - "eager_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "eager_greedy([h], preferred=[h])"], - "eager_greedy_cea": [ - "--heuristic", - "h=cea()", - "--search", - "eager_greedy([h], preferred=[h])"], - "lazy_greedy_add": [ - "--heuristic", - "h=add()", - "--search", - "lazy_greedy([h], preferred=[h])"], - "lazy_greedy_cg": [ - "--heuristic", - "h=cg()", - "--search", - "lazy_greedy([h], preferred=[h])"], - "lama-first": [ - "--heuristic", - """hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one), - transform=adapt_costs(one))""", - "--heuristic", "hff=ff_synergy(hlm)", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false)"""], -} -CONFIGS = [ - IssueConfig( - "-".join([config_nick, build]), - config, - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_DICT.items() -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - for rev in REVISIONS: - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick, _ in CONFIG_DICT.items()] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue213-{build1}-vs-{build2}-{rev}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue213/v8-lama-30min.py b/experiments/issue213/v8-lama-30min.py deleted file mode 100755 index 323449e093..0000000000 --- a/experiments/issue213/v8-lama-30min.py +++ /dev/null @@ -1,75 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -EXPNAME = common_setup.get_experiment_name() -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v8"] -BUILDS = ["release32", "release64"] -CONFIGS = [ - IssueConfig( - "lama-" + build, - [], - build_options=[build], - driver_options=["--build", build, "--alias", "lama"]) - for rev in REVISIONS - for build in BUILDS -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.PORTFOLIO_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - for rev in REVISIONS: - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick in ["lama"]] - outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals())) - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - outfile=outfile) - exp.add_step( - 'publish-report', subprocess.call, ['publish', outfile]) - -exp.run_steps() diff --git a/experiments/issue213/v8-lama-5min.py b/experiments/issue213/v8-lama-5min.py deleted file mode 100755 index ad00e36074..0000000000 --- a/experiments/issue213/v8-lama-5min.py +++ /dev/null @@ -1,75 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -EXPNAME = common_setup.get_experiment_name() -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v8"] -BUILDS = ["release32", "release64"] -CONFIGS = [ - IssueConfig( - "lama-" + build, - [], - build_options=[build], - driver_options=["--build", build, "--alias", "lama", "--overall-time-limit", "5m"]) - for rev in REVISIONS - for build in BUILDS -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.PORTFOLIO_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - for rev in REVISIONS: - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick in ["lama"]] - outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals())) - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - outfile=outfile) - exp.add_step( - 'publish-report', subprocess.call, ['publish', outfile]) - -exp.run_steps() diff --git a/experiments/issue213/v8-opt-30min.py b/experiments/issue213/v8-opt-30min.py deleted file mode 100755 index 0761e58da9..0000000000 --- a/experiments/issue213/v8-opt-30min.py +++ /dev/null @@ -1,96 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -EXPNAME = common_setup.get_experiment_name() -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v8"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("bjolp", [ - "--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - ("blind", ["--search", "astar(blind())"]), - ("cegar", ["--search", "astar(cegar())"]), - ("divpot", ["--search", "astar(diverse_potentials())"]), - ("ipdb", ["--search", "astar(ipdb())"]), - ("lmcut", ["--search", "astar(lmcut())"]), - ("mas", [ - "--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false)," - " merge_strategy=merge_sccs(order_of_sccs=topological," - " merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order]))," - " label_reduction=exact(before_shrinking=true, before_merging=false)," - " max_states=50000, threshold_before_merge=1))"]), - ("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"]), - ("h2", ["--search", "astar(hm(m=2))"]), - ("hmax", ["--search", "astar(hmax())"]), -] -CONFIGS = [ - IssueConfig( - "-".join([search_nick, build]), - search, - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for rev in REVISIONS - for build in BUILDS - for search_nick, search in SEARCHES -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - for rev in REVISIONS: - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick, _ in SEARCHES] - outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals())) - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - outfile=outfile) - exp.add_step( - 'publish-report', subprocess.call, ['publish', outfile]) - -exp.run_steps() diff --git a/experiments/issue213/v8-opt-5min-debug.py b/experiments/issue213/v8-opt-5min-debug.py deleted file mode 100755 index 7933a57d78..0000000000 --- a/experiments/issue213/v8-opt-5min-debug.py +++ /dev/null @@ -1,79 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -EXPNAME = common_setup.get_experiment_name() -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v8"] -BUILDS = ["debug64"] -SEARCHES = [ - ("bjolp", [ - "--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - ("blind", ["--search", "astar(blind())"]), - ("cegar", ["--search", "astar(cegar())"]), - ("divpot", ["--search", "astar(diverse_potentials())"]), - ("ipdb", ["--search", "astar(ipdb())"]), - ("lmcut", ["--search", "astar(lmcut())"]), - ("mas", [ - "--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false)," - " merge_strategy=merge_sccs(order_of_sccs=topological," - " merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order]))," - " label_reduction=exact(before_shrinking=true, before_merging=false)," - " max_states=50000, threshold_before_merge=1))"]), - ("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"]), - ("h2", ["--search", "astar(hm(m=2))"]), - ("hmax", ["--search", "astar(hmax())"]), -] -CONFIGS = [ - IssueConfig( - "-".join([search_nick, build]), - search, - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for rev in REVISIONS - for build in BUILDS - for search_nick, search in SEARCHES -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") - -exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue213/v8-opt-5min.py b/experiments/issue213/v8-opt-5min.py deleted file mode 100755 index cb30218cfd..0000000000 --- a/experiments/issue213/v8-opt-5min.py +++ /dev/null @@ -1,96 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -EXPNAME = common_setup.get_experiment_name() -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v8"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("bjolp", [ - "--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - ("blind", ["--search", "astar(blind())"]), - ("cegar", ["--search", "astar(cegar())"]), - ("divpot", ["--search", "astar(diverse_potentials())"]), - ("ipdb", ["--search", "astar(ipdb())"]), - ("lmcut", ["--search", "astar(lmcut())"]), - ("mas", [ - "--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false)," - " merge_strategy=merge_sccs(order_of_sccs=topological," - " merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order]))," - " label_reduction=exact(before_shrinking=true, before_merging=false)," - " max_states=50000, threshold_before_merge=1))"]), - ("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"]), - ("h2", ["--search", "astar(hm(m=2))"]), - ("hmax", ["--search", "astar(hmax())"]), -] -CONFIGS = [ - IssueConfig( - "-".join([search_nick, build]), - search, - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for rev in REVISIONS - for build in BUILDS - for search_nick, search in SEARCHES -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - for rev in REVISIONS: - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick, _ in SEARCHES] - outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals())) - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - outfile=outfile) - exp.add_step( - 'publish-report', subprocess.call, ['publish', outfile]) - -exp.run_steps() diff --git a/experiments/issue213/v8-sat-30min.py b/experiments/issue213/v8-sat-30min.py deleted file mode 100755 index c42485632c..0000000000 --- a/experiments/issue213/v8-sat-30min.py +++ /dev/null @@ -1,114 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -EXPNAME = common_setup.get_experiment_name() -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v8"] -BUILDS = ["release32", "release64"] -CONFIG_DICT = { - "eager_greedy_ff": [ - "--evaluator", - "h=ff()", - "--search", - "eager_greedy([h], preferred=[h])"], - "eager_greedy_cea": [ - "--evaluator", - "h=cea()", - "--search", - "eager_greedy([h], preferred=[h])"], - "lazy_greedy_add": [ - "--evaluator", - "h=add()", - "--search", - "lazy_greedy([h], preferred=[h])"], - "lazy_greedy_cg": [ - "--evaluator", - "h=cg()", - "--search", - "lazy_greedy([h], preferred=[h])"], - "lama-first": [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false)"""], - "lama-first-typed": [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - "lazy(alt([single(hff), single(hff, pref_only=true)," - "single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000)," - "preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true," - "preferred_successors_first=false)"], -} -CONFIGS = [ - IssueConfig( - "-".join([config_nick, build]), - config, - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_DICT.items() -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - for rev in REVISIONS: - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick in CONFIG_DICT.keys()] - outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals())) - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - outfile=outfile) - exp.add_step( - 'publish-report', subprocess.call, ['publish', outfile]) - -exp.run_steps() diff --git a/experiments/issue213/v8-sat-5min-debug.py b/experiments/issue213/v8-sat-5min-debug.py deleted file mode 100755 index 58e14c161b..0000000000 --- a/experiments/issue213/v8-sat-5min-debug.py +++ /dev/null @@ -1,97 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -EXPNAME = common_setup.get_experiment_name() -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v8"] -BUILDS = ["debug64"] -CONFIG_DICT = { - "eager_greedy_ff": [ - "--evaluator", - "h=ff()", - "--search", - "eager_greedy([h], preferred=[h])"], - "eager_greedy_cea": [ - "--evaluator", - "h=cea()", - "--search", - "eager_greedy([h], preferred=[h])"], - "lazy_greedy_add": [ - "--evaluator", - "h=add()", - "--search", - "lazy_greedy([h], preferred=[h])"], - "lazy_greedy_cg": [ - "--evaluator", - "h=cg()", - "--search", - "lazy_greedy([h], preferred=[h])"], - "lama-first": [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false)"""], - "lama-first-typed": [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - "lazy(alt([single(hff), single(hff, pref_only=true)," - "single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000)," - "preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true," - "preferred_successors_first=false)"], -} -CONFIGS = [ - IssueConfig( - "-".join([config_nick, build]), - config, - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_DICT.items() -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") - -exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue213/v8-sat-5min.py b/experiments/issue213/v8-sat-5min.py deleted file mode 100755 index bc8ed8141e..0000000000 --- a/experiments/issue213/v8-sat-5min.py +++ /dev/null @@ -1,114 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -EXPNAME = common_setup.get_experiment_name() -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue213-v8"] -BUILDS = ["release32", "release64"] -CONFIG_DICT = { - "eager_greedy_ff": [ - "--evaluator", - "h=ff()", - "--search", - "eager_greedy([h], preferred=[h])"], - "eager_greedy_cea": [ - "--evaluator", - "h=cea()", - "--search", - "eager_greedy([h], preferred=[h])"], - "lazy_greedy_add": [ - "--evaluator", - "h=add()", - "--search", - "lazy_greedy([h], preferred=[h])"], - "lazy_greedy_cg": [ - "--evaluator", - "h=cg()", - "--search", - "lazy_greedy([h], preferred=[h])"], - "lama-first": [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false)"""], - "lama-first-typed": [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - "lazy(alt([single(hff), single(hff, pref_only=true)," - "single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000)," - "preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true," - "preferred_successors_first=false)"], -} -CONFIGS = [ - IssueConfig( - "-".join([config_nick, build]), - config, - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_DICT.items() -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - for rev in REVISIONS: - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick, _ in CONFIG_DICT.items()] - outfile = os.path.join(exp.eval_dir, "{EXPNAME}-{build1}-vs-{build2}.html".format(**locals())) - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - outfile=outfile) - exp.add_step( - 'publish-report', subprocess.call, ['publish', outfile]) - -exp.run_steps() diff --git a/experiments/issue214/common_setup.py b/experiments/issue214/common_setup.py deleted file mode 100644 index 56d69012c2..0000000000 --- a/experiments/issue214/common_setup.py +++ /dev/null @@ -1,238 +0,0 @@ -# -*- coding: utf-8 -*- - -import os.path - -from lab.environments import MaiaEnvironment -from lab.steps import Step - -from downward.checkouts import Translator, Preprocessor, Planner -from downward.experiments import DownwardExperiment -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the filename of the main script, e.g. - "/ham/spam/eggs.py" => "eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Found by searching upwards in the directory tree from the main - script until a directory with a subdirectory named ".hg" is found.""" - path = os.path.abspath(get_script_dir()) - while True: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - - -class MyExperiment(DownwardExperiment): - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "total_time", - "search_time", - "memory", - "expansions_until_last_jump", - ] - - """Wrapper for DownwardExperiment with a few convenience features.""" - - def __init__(self, configs=None, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - suite=None, parsers=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - If "configs" is specified, it should be a dict of {nick: - cmdline} pairs that sets the planner configurations to test. - - If "grid_priority" is specified and no environment is - specifically requested in **kwargs, use the maia environment - with the specified priority. - - If "path" is not specified, the experiment data path is - derived automatically from the main script's filename. - - If "repo" is not specified, the repository base is derived - automatically from the main script's path. - - If "revisions" is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. - - If "search_revisions" is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All experiments use the - translator and preprocessor component of the first - revision. - - If "suite" is specified, it should specify a problem suite. - - If "parsers" is specified, it should be a list of paths to - parsers that should be run in addition to search_parser.py. - - Options "combinations" (from the base class), "revisions" and - "search_revisions" are mutually exclusive.""" - - if grid_priority is not None and "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - num_rev_opts_specified = ( - int(revisions is not None) + - int(search_revisions is not None) + - int(kwargs.get("combinations") is not None)) - - if num_rev_opts_specified > 1: - raise ValueError('must specify exactly one of "revisions", ' - '"search_revisions" or "combinations"') - - # See add_comparison_table_step for more on this variable. - self._HACK_revisions = revisions - - if revisions is not None: - if not revisions: - raise ValueError("revisions cannot be empty") - combinations = [(Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions] - kwargs["combinations"] = combinations - - if search_revisions is not None: - if not search_revisions: - raise ValueError("search_revisions cannot be empty") - base_rev = search_revisions[0] - translator = Translator(repo, base_rev) - preprocessor = Preprocessor(repo, base_rev) - combinations = [(translator, preprocessor, Planner(repo, rev)) - for rev in search_revisions] - kwargs["combinations"] = combinations - - self._additional_parsers = parsers or [] - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - if configs is not None: - for nick, config in configs.items(): - self.add_config(nick, config) - - if suite is not None: - self.add_suite(suite) - - self._report_prefix = get_experiment_name() - - def _make_search_runs(self): - DownwardExperiment._make_search_runs(self) - for i, parser in enumerate(self._additional_parsers): - parser_alias = 'ADDITIONALPARSER%d' % i - self.add_resource(parser_alias, parser, os.path.basename(parser)) - for run in self.runs: - run.require_resource(parser_alias) - run.add_command('additional-parser-%d' % i, [parser_alias]) - - def add_comparison_table_step(self, attributes=None): - revisions = self._HACK_revisions - if revisions is None: - # TODO: It's not clear to me what a "revision" in the - # overall context of the code really is, e.g. when keeping - # the translator and preprocessor method fixed and only - # changing the search component. It's also not really - # clear to me how the interface of the Compare... reports - # works and how to use it more generally. Hence the - # present hack. - - # Ideally, this method should look at the table columns we - # have (defined by planners and planner configurations), - # pair them up in a suitable way, either controlled by a - # convenience parameter or a more general grouping method, - # and then use this to define which pairs go together. - raise NotImplementedError( - "only supported when specifying revisions in __init__") - - if attributes is None: - attributes = self.DEFAULT_TABLE_ATTRIBUTES - report = CompareRevisionsReport(*revisions, attributes=attributes) - self.add_report(report, outfile="%s-compare.html" % self._report_prefix) - - def add_scatter_plot_step(self, attributes=None): - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - revisions = self._HACK_revisions - if revisions is None: - # TODO: See add_comparison_table_step. - raise NotImplementedError( - "only supported when specifying revisions in __init__") - if len(revisions) != 2: - # TODO: Should generalize this, too, by offering a general - # grouping function and then comparing any pair of - # settings in the same group. - raise NotImplementedError("need two revisions") - scatter_dir = os.path.join(self.eval_dir, "scatter") - def make_scatter_plots(): - configs = [conf[0] for conf in self.configs] - for nick in configs: - config_before = "%s-%s" % (revisions[0], nick) - config_after = "%s-%s" % (revisions[1], nick) - for attribute in attributes: - name = "%s-%s-%s" % (self._report_prefix, attribute, nick) - report = ScatterPlotReport( - filter_config=[config_before, config_after], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue214/issue214-sat.py b/experiments/issue214/issue214-sat.py deleted file mode 100755 index a19d054fa4..0000000000 --- a/experiments/issue214/issue214-sat.py +++ /dev/null @@ -1,53 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward.suites import suite_satisficing_with_ipc11 -from downward.configs import default_configs_satisficing -from downward.reports.scatter import ScatterPlotReport - -import common_setup - - -REVS = ["issue214-base", "issue214-v2"] -CONFIGS = default_configs_satisficing() - -TEST_RUN = True - -if TEST_RUN: - SUITE = "gripper:prob01.pddl" - PRIORITY = None # "None" means local experiment -else: - SUITE = suite_satisficing_with_ipc11() - PRIORITY = 0 # number means maia experiment - - -exp = common_setup.MyExperiment( - grid_priority=PRIORITY, - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - parsers=['state_size_parser.py'], - ) - - -exp.add_comparison_table_step( - attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES + ['bytes_per_state', 'variables', 'state_var_t_size'] -) -exp.add_scatter_plot_step() - -exp.add_report(ScatterPlotReport( - attributes=['bytes_per_state'], - filter_config_nick='astar_blind', - ), - outfile='issue214_sat_bytes_per_state.png') - -for config_nick in ['lazy_greedy_ff', 'eager_greedy_cg', 'seq_sat_lama_2011']: - for attr in ['memory', 'total_time']: - exp.add_report(ScatterPlotReport( - attributes=[attr], - filter_config_nick=config_nick, - ), - outfile='issue214_sat_%s_%s.png' % (attr, config_nick)) - - -exp() diff --git a/experiments/issue214/issue214-v3-ipdb.py b/experiments/issue214/issue214-v3-ipdb.py deleted file mode 100755 index 0aecc11784..0000000000 --- a/experiments/issue214/issue214-v3-ipdb.py +++ /dev/null @@ -1,36 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward.suites import suite_optimal_with_ipc11 -from downward.configs import default_configs_optimal -from downward.reports.scatter import ScatterPlotReport - -import common_setup - - -REVS = ["issue214-base", "issue214-v3"] -CONFIGS = {"ipdb": ["--search", "astar(ipdb())"]} - -TEST_RUN = True - -if TEST_RUN: - SUITE = "gripper:prob01.pddl" - PRIORITY = None # "None" means local experiment -else: - SUITE = suite_optimal_with_ipc11() - PRIORITY = 0 # number means maia experiment - - -exp = common_setup.MyExperiment( - grid_priority=PRIORITY, - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - parsers=['state_size_parser.py'], - ) - -exp.add_comparison_table_step( - attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES + ['bytes_per_state', 'variables', 'state_var_t_size'] -) - -exp() diff --git a/experiments/issue214/issue214-v4-ipdb.py b/experiments/issue214/issue214-v4-ipdb.py deleted file mode 100755 index 801772ce62..0000000000 --- a/experiments/issue214/issue214-v4-ipdb.py +++ /dev/null @@ -1,36 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward.suites import suite_optimal_with_ipc11 -from downward.configs import default_configs_optimal -from downward.reports.scatter import ScatterPlotReport - -import common_setup - - -REVS = ["issue214-base", "issue214-v4"] -CONFIGS = {"ipdb": ["--search", "astar(ipdb())"]} - -TEST_RUN = False - -if TEST_RUN: - SUITE = "gripper:prob01.pddl" - PRIORITY = None # "None" means local experiment -else: - SUITE = suite_optimal_with_ipc11() - PRIORITY = 0 # number means maia experiment - - -exp = common_setup.MyExperiment( - grid_priority=PRIORITY, - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - parsers=['state_size_parser.py'], - ) - -exp.add_comparison_table_step( - attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES + ['bytes_per_state', 'variables', 'state_var_t_size'] -) - -exp() diff --git a/experiments/issue214/issue214-v5-sat.py b/experiments/issue214/issue214-v5-sat.py deleted file mode 100755 index 7e0f2fbea6..0000000000 --- a/experiments/issue214/issue214-v5-sat.py +++ /dev/null @@ -1,36 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from downward.configs import default_configs_optimal -from downward.reports.scatter import ScatterPlotReport - -import common_setup - - -REVS = ["issue214-base", "issue214-v5"] -CONFIGS = {"blind": ["--search", "astar(blind())"]} - -TEST_RUN = False - -if TEST_RUN: - SUITE = "gripper:prob01.pddl" - PRIORITY = None # "None" means local experiment -else: - SUITE = list(sorted(set(suites.suite_all()) - set(suites.suite_optimal_with_ipc11()))) - PRIORITY = 0 # number means maia experiment - - -exp = common_setup.MyExperiment( - grid_priority=PRIORITY, - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - parsers=['state_size_parser.py'], - ) - -exp.add_comparison_table_step( - attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES + ['bytes_per_state', 'variables', 'state_var_t_size'] -) - -exp() diff --git a/experiments/issue214/issue214-v5.py b/experiments/issue214/issue214-v5.py deleted file mode 100755 index fb5841cbf5..0000000000 --- a/experiments/issue214/issue214-v5.py +++ /dev/null @@ -1,36 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward.suites import suite_optimal_with_ipc11 -from downward.configs import default_configs_optimal -from downward.reports.scatter import ScatterPlotReport - -import common_setup - - -REVS = ["issue214-base", "issue214-v5"] -CONFIGS = {"blind": ["--search", "astar(blind())"]} - -TEST_RUN = False - -if TEST_RUN: - SUITE = "gripper:prob01.pddl" - PRIORITY = None # "None" means local experiment -else: - SUITE = suite_optimal_with_ipc11() - PRIORITY = 0 # number means maia experiment - - -exp = common_setup.MyExperiment( - grid_priority=PRIORITY, - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - parsers=['state_size_parser.py'], - ) - -exp.add_comparison_table_step( - attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES + ['bytes_per_state', 'variables', 'state_var_t_size'] -) - -exp() diff --git a/experiments/issue214/issue214.py b/experiments/issue214/issue214.py deleted file mode 100755 index 43366d52ad..0000000000 --- a/experiments/issue214/issue214.py +++ /dev/null @@ -1,56 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward.suites import suite_optimal_with_ipc11 -from downward.configs import default_configs_optimal -from downward.reports.scatter import ScatterPlotReport - -import common_setup - - -REVS = ["issue214-base", "issue214-v2"] -CONFIGS = default_configs_optimal() - -# remove config that is disabled in this branch -del CONFIGS['astar_selmax_lmcut_lmcount'] - -TEST_RUN = True - -if TEST_RUN: - SUITE = "gripper:prob01.pddl" - PRIORITY = None # "None" means local experiment -else: - SUITE = suite_optimal_with_ipc11() - PRIORITY = 0 # number means maia experiment - - -exp = common_setup.MyExperiment( - grid_priority=PRIORITY, - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - parsers=['state_size_parser.py'], - ) - - -exp.add_comparison_table_step( - attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES + ['bytes_per_state', 'variables', 'state_var_t_size'] -) -exp.add_scatter_plot_step() - -exp.add_report(ScatterPlotReport( - attributes=['bytes_per_state'], - filter_config_nick='astar_blind', - ), - outfile='issue214_bytes_per_state.png') - -for config_nick in ['astar_blind', 'astar_lmcut', 'astar_merge_and_shrink_bisim', 'astar_ipdb']: - for attr in ['memory', 'total_time']: - exp.add_report(ScatterPlotReport( - attributes=[attr], - filter_config_nick=config_nick, - ), - outfile='issue214_%s_%s.png' % (attr, config_nick)) - - -exp() diff --git a/experiments/issue214/state_size_parser.py b/experiments/issue214/state_size_parser.py deleted file mode 100755 index 1d66cae022..0000000000 --- a/experiments/issue214/state_size_parser.py +++ /dev/null @@ -1,23 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -def calculate_old_state_size(content, props): - if 'bytes_per_state' not in props and 'preprocessor_variables' in props and 'state_var_t_size' in props: - props['bytes_per_state'] = props['preprocessor_variables'] * props['state_var_t_size'] - -class StateSizeParser(Parser): - def __init__(self): - Parser.__init__(self) - self.add_pattern('bytes_per_state', 'Bytes per state: (\d+)', - required=False, type=int) - self.add_pattern('state_var_t_size', 'Dispatcher selected state size (\d).', - required=False, type=int) - self.add_pattern('variables', 'Variables: (\d+)', - required=False, type=int) - self.add_function(calculate_old_state_size) - -if __name__ == '__main__': - parser = StateSizeParser() - print 'Running state size parser' - parser.parse() diff --git a/experiments/issue269/common_setup.py b/experiments/issue269/common_setup.py deleted file mode 100644 index 1efe1ed71b..0000000000 --- a/experiments/issue269/common_setup.py +++ /dev/null @@ -1,352 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in valid_attributes: - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue269/opt.py b/experiments/issue269/opt.py deleted file mode 100755 index 7129397d27..0000000000 --- a/experiments/issue269/opt.py +++ /dev/null @@ -1,28 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue269-base", "issue269-v1"] -LIMITS = {"search_time": 600} -SUITE = suites.suite_optimal_with_ipc11() - -CONFIGS = { - "mas-label-order": ["--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation,label_reduction_system_order=random))"], - "mas-buckets": ["--search", "astar(merge_and_shrink(shrink_strategy=shrink_fh,label_reduction_system_order=regular))"], - "gapdb": ["--search", "astar(gapdb())"], - "ipdb": ["--search", "astar(ipdb())"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue269/rng-microbenchmark/.gitignore b/experiments/issue269/rng-microbenchmark/.gitignore deleted file mode 100644 index 10e7a1e57c..0000000000 --- a/experiments/issue269/rng-microbenchmark/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -/.obj/ -/benchmark -/Makefile.depend diff --git a/experiments/issue269/rng-microbenchmark/Makefile b/experiments/issue269/rng-microbenchmark/Makefile deleted file mode 100644 index bee346dd93..0000000000 --- a/experiments/issue269/rng-microbenchmark/Makefile +++ /dev/null @@ -1,148 +0,0 @@ -DOWNWARD_BITWIDTH=32 - -HEADERS = \ - alt_inlined_rng.h \ - inlined_rng.h \ - old_rng.h \ - rng.h \ - -SOURCES = main.cc $(HEADERS:%.h=%.cc) -TARGET = benchmark - -default: release - -OBJECT_SUFFIX_RELEASE = .release -TARGET_SUFFIX_RELEASE = -OBJECT_SUFFIX_DEBUG = .debug -TARGET_SUFFIX_DEBUG = -debug -OBJECT_SUFFIX_PROFILE = .profile -TARGET_SUFFIX_PROFILE = -profile - -OBJECTS_RELEASE = $(SOURCES:%.cc=.obj/%$(OBJECT_SUFFIX_RELEASE).o) -TARGET_RELEASE = $(TARGET)$(TARGET_SUFFIX_RELEASE) - -OBJECTS_DEBUG = $(SOURCES:%.cc=.obj/%$(OBJECT_SUFFIX_DEBUG).o) -TARGET_DEBUG = $(TARGET)$(TARGET_SUFFIX_DEBUG) - -OBJECTS_PROFILE = $(SOURCES:%.cc=.obj/%$(OBJECT_SUFFIX_PROFILE).o) -TARGET_PROFILE = $(TARGET)$(TARGET_SUFFIX_PROFILE) - -DEPEND = $(CXX) -MM - -## CXXFLAGS, LDFLAGS, POSTLINKOPT are options for compiler and linker -## that are used for all three targets (release, debug, and profile). -## (POSTLINKOPT are options that appear *after* all object files.) - -ifeq ($(DOWNWARD_BITWIDTH), 32) - BITWIDTHOPT = -m32 -else ifeq ($(DOWNWARD_BITWIDTH), 64) - BITWIDTHOPT = -m64 -else ifneq ($(DOWNWARD_BITWIDTH), native) - $(error Bad value for DOWNWARD_BITWIDTH) -endif - -CXXFLAGS = -CXXFLAGS += -g -CXXFLAGS += $(BITWIDTHOPT) -# Note: we write "-std=c++0x" rather than "-std=c++11" to support gcc 4.4. -CXXFLAGS += -std=c++0x -Wall -Wextra -pedantic -Wno-deprecated -Werror - -LDFLAGS = -LDFLAGS += $(BITWIDTHOPT) -LDFLAGS += -g - -POSTLINKOPT = - -CXXFLAGS_RELEASE = -O3 -DNDEBUG -fomit-frame-pointer -CXXFLAGS_DEBUG = -O3 -CXXFLAGS_PROFILE = -O3 -pg - -LDFLAGS_RELEASE = -LDFLAGS_DEBUG = -LDFLAGS_PROFILE = -pg - -POSTLINKOPT_RELEASE = -POSTLINKOPT_DEBUG = -POSTLINKOPT_PROFILE = - -LDFLAGS_RELEASE += -static -static-libgcc - -POSTLINKOPT_RELEASE += -Wl,-Bstatic -lrt -POSTLINKOPT_DEBUG += -lrt -POSTLINKOPT_PROFILE += -lrt - -all: release debug profile - -## Build rules for the release target follow. - -release: $(TARGET_RELEASE) - -$(TARGET_RELEASE): $(OBJECTS_RELEASE) - $(CXX) $(LDFLAGS) $(LDFLAGS_RELEASE) $(OBJECTS_RELEASE) $(POSTLINKOPT) $(POSTLINKOPT_RELEASE) -o $(TARGET_RELEASE) - -$(OBJECTS_RELEASE): .obj/%$(OBJECT_SUFFIX_RELEASE).o: %.cc - @mkdir -p $$(dirname $@) - $(CXX) $(CXXFLAGS) $(CXXFLAGS_RELEASE) -c $< -o $@ - -## Build rules for the debug target follow. - -debug: $(TARGET_DEBUG) - -$(TARGET_DEBUG): $(OBJECTS_DEBUG) - $(CXX) $(LDFLAGS) $(LDFLAGS_DEBUG) $(OBJECTS_DEBUG) $(POSTLINKOPT) $(POSTLINKOPT_DEBUG) -o $(TARGET_DEBUG) - -$(OBJECTS_DEBUG): .obj/%$(OBJECT_SUFFIX_DEBUG).o: %.cc - @mkdir -p $$(dirname $@) - $(CXX) $(CXXFLAGS) $(CXXFLAGS_DEBUG) -c $< -o $@ - -## Build rules for the profile target follow. - -profile: $(TARGET_PROFILE) - -$(TARGET_PROFILE): $(OBJECTS_PROFILE) - $(CXX) $(LDFLAGS) $(LDFLAGS_PROFILE) $(OBJECTS_PROFILE) $(POSTLINKOPT) $(POSTLINKOPT_PROFILE) -o $(TARGET_PROFILE) - -$(OBJECTS_PROFILE): .obj/%$(OBJECT_SUFFIX_PROFILE).o: %.cc - @mkdir -p $$(dirname $@) - $(CXX) $(CXXFLAGS) $(CXXFLAGS_PROFILE) -c $< -o $@ - -## Additional targets follow. - -PROFILE: $(TARGET_PROFILE) - ./$(TARGET_PROFILE) $(ARGS_PROFILE) - gprof $(TARGET_PROFILE) | (cleanup-profile 2> /dev/null || cat) > PROFILE - -clean: - rm -rf .obj - rm -f *~ *.pyc - rm -f Makefile.depend gmon.out PROFILE core - rm -f sas_plan - -distclean: clean - rm -f $(TARGET_RELEASE) $(TARGET_DEBUG) $(TARGET_PROFILE) - -## NOTE: If we just call gcc -MM on a source file that lives within a -## subdirectory, it will strip the directory part in the output. Hence -## the for loop with the sed call. - -Makefile.depend: $(SOURCES) $(HEADERS) - rm -f Makefile.temp - for source in $(SOURCES) ; do \ - $(DEPEND) $(CXXFLAGS) $$source > Makefile.temp0; \ - objfile=$${source%%.cc}.o; \ - sed -i -e "s@^[^:]*:@$$objfile:@" Makefile.temp0; \ - cat Makefile.temp0 >> Makefile.temp; \ - done - rm -f Makefile.temp0 Makefile.depend - sed -e "s@\(.*\)\.o:\(.*\)@.obj/\1$(OBJECT_SUFFIX_RELEASE).o:\2@" Makefile.temp >> Makefile.depend - sed -e "s@\(.*\)\.o:\(.*\)@.obj/\1$(OBJECT_SUFFIX_DEBUG).o:\2@" Makefile.temp >> Makefile.depend - sed -e "s@\(.*\)\.o:\(.*\)@.obj/\1$(OBJECT_SUFFIX_PROFILE).o:\2@" Makefile.temp >> Makefile.depend - rm -f Makefile.temp - -ifneq ($(MAKECMDGOALS),clean) - ifneq ($(MAKECMDGOALS),distclean) - -include Makefile.depend - endif -endif - -.PHONY: default all release debug profile clean distclean diff --git a/experiments/issue269/rng-microbenchmark/alt_inlined_rng.cc b/experiments/issue269/rng-microbenchmark/alt_inlined_rng.cc deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/experiments/issue269/rng-microbenchmark/alt_inlined_rng.h b/experiments/issue269/rng-microbenchmark/alt_inlined_rng.h deleted file mode 100644 index c254e0ca74..0000000000 --- a/experiments/issue269/rng-microbenchmark/alt_inlined_rng.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef ALT_INLINED_RNG_H -#define ALT_INLINED_RNG_H - -#include -#include - -class AltInlinedRandomNumberGenerator { - std::mt19937 rng; - std::uniform_real_distribution double_distribution { - 0.0, 1.0 - }; -public: - explicit AltInlinedRandomNumberGenerator(int seed) { - rng.seed(seed); - } - - double operator()() { - return double_distribution(rng); - } - - int operator()(int bound) { - assert(bound > 0); - std::uniform_int_distribution distribution(0, bound - 1); - return distribution(rng); - } -}; - -#endif diff --git a/experiments/issue269/rng-microbenchmark/inlined_rng.cc b/experiments/issue269/rng-microbenchmark/inlined_rng.cc deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/experiments/issue269/rng-microbenchmark/inlined_rng.h b/experiments/issue269/rng-microbenchmark/inlined_rng.h deleted file mode 100644 index 5044bacf15..0000000000 --- a/experiments/issue269/rng-microbenchmark/inlined_rng.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef INLINED_RNG_H -#define INLINED_RNG_H - -#include -#include - -class InlinedRandomNumberGenerator { - std::mt19937 rng; -public: - explicit InlinedRandomNumberGenerator(int seed) { - rng.seed(seed); - } - - double operator()() { - std::uniform_real_distribution distribution(0.0, 1.0); - return distribution(rng); - } - - int operator()(int bound) { - assert(bound > 0); - std::uniform_int_distribution distribution(0, bound - 1); - return distribution(rng); - } -}; - -#endif diff --git a/experiments/issue269/rng-microbenchmark/main.cc b/experiments/issue269/rng-microbenchmark/main.cc deleted file mode 100644 index 47c2b9a6cd..0000000000 --- a/experiments/issue269/rng-microbenchmark/main.cc +++ /dev/null @@ -1,63 +0,0 @@ -#include -#include -#include -#include - -#include "alt_inlined_rng.h" -#include "inlined_rng.h" -#include "old_rng.h" -#include "rng.h" - -using namespace std; - - -void benchmark(const string &desc, int num_calls, - const function &func) { - cout << "Running " << desc << " " << num_calls << " times:" << flush; - clock_t start = clock(); - for (int i = 0; i < num_calls; ++i) - func(); - clock_t end = clock(); - double duration = static_cast(end - start) / CLOCKS_PER_SEC; - cout << " " << duration << " seconds" << endl; -} - - -int main(int, char **) { - const int NUM_ITERATIONS = 100000000; - - const int SEED = 2014; - OldRandomNumberGenerator old_rng(SEED); - RandomNumberGenerator new_rng(SEED); - InlinedRandomNumberGenerator inlined_rng(SEED); - AltInlinedRandomNumberGenerator alt_inlined_rng(SEED); - - benchmark("nothing", NUM_ITERATIONS, [] () {}); - cout << endl; - benchmark("random double (old RNG)", - NUM_ITERATIONS, - [&]() {old_rng();}); - benchmark("random double (new RNG, old distribution)", - NUM_ITERATIONS, - [&]() {new_rng.get_double_old();}); - benchmark("random double (new RNG)", - NUM_ITERATIONS, - [&]() {new_rng();}); - benchmark("random double (inlined RNG)", - NUM_ITERATIONS, - [&]() {inlined_rng();}); - benchmark("random double (alternative inlined RNG)", - NUM_ITERATIONS, - [&]() {alt_inlined_rng();}); - cout << endl; - benchmark("random int in 0..999 (old RNG)", - NUM_ITERATIONS, - [&]() {old_rng(1000);}); - benchmark("random int in 0..999 (new RNG, old distribution)", - NUM_ITERATIONS, - [&]() {new_rng.get_int_old(1000);}); - benchmark("random int in 0..999 (inlined RNG)", - NUM_ITERATIONS, - [&]() {inlined_rng(1000);}); - return 0; -} diff --git a/experiments/issue269/rng-microbenchmark/old_rng.cc b/experiments/issue269/rng-microbenchmark/old_rng.cc deleted file mode 100644 index b89fd7ce7f..0000000000 --- a/experiments/issue269/rng-microbenchmark/old_rng.cc +++ /dev/null @@ -1,137 +0,0 @@ -/* - Mersenne Twister Random Number Generator. - Based on the C Code by Takuji Nishimura and Makoto Matsumoto. - http://www.math.keio.ac.jp/~matumoto/emt.html -*/ - -#include "old_rng.h" - -#include -using namespace std; - -static const int M = 397; -static const unsigned int MATRIX_A = 0x9908b0dfU; -static const unsigned int UPPER_MASK = 0x80000000U; -static const unsigned int LOWER_MASK = 0x7fffffffU; - -OldRandomNumberGenerator::OldRandomNumberGenerator() { - seed(static_cast(time(0))); -} - -OldRandomNumberGenerator::OldRandomNumberGenerator(int s) { - seed(s); -} - -OldRandomNumberGenerator::OldRandomNumberGenerator( - unsigned int *init_key, int key_length) { - seed(init_key, key_length); -} - -OldRandomNumberGenerator::OldRandomNumberGenerator( - const OldRandomNumberGenerator ©) { - *this = copy; -} - -OldRandomNumberGenerator &OldRandomNumberGenerator::operator=( - const OldRandomNumberGenerator ©) { - for (int i = 0; i < N; ++i) - mt[i] = copy.mt[i]; - mti = copy.mti; - return *this; -} - -void OldRandomNumberGenerator::seed(int se) { - unsigned int s = (static_cast(se) << 1) + 1; - // Seeds should not be zero. Other possible solutions (such as s |= 1) - // lead to more confusion, because often-used low seeds like 2 and 3 would - // be identical. This leads to collisions only for rarely used seeds (see - // note in header file). - mt[0] = s & 0xffffffffUL; - for (mti = 1; mti < N; ++mti) { - mt[mti] = (1812433253UL * (mt[mti - 1] ^ (mt[mti - 1] >> 30)) + mti); - mt[mti] &= 0xffffffffUL; - } -} - -void OldRandomNumberGenerator::seed(unsigned int *init_key, int key_length) { - int i = 1, j = 0, k = (N > key_length ? N : key_length); - seed(19650218UL); - for (; k; --k) { - mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1664525UL)) + - init_key[j] + j; - mt[i] &= 0xffffffffUL; - ++i; - ++j; - if (i >= N) { - mt[0] = mt[N - 1]; - i = 1; - } - if (j >= key_length) - j = 0; - } - for (k = N - 1; k; --k) { - mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1566083941UL)) - i; - mt[i] &= 0xffffffffUL; - ++i; - if (i >= N) { - mt[0] = mt[N - 1]; - i = 1; - } - } - mt[0] = 0x80000000UL; -} - -unsigned int OldRandomNumberGenerator::next32() { - unsigned int y; - static unsigned int mag01[2] = { - 0x0UL, MATRIX_A - }; - if (mti >= N) { - int kk; - for (kk = 0; kk < N - M; ++kk) { - y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK); - mt[kk] = mt[kk + M] ^ (y >> 1) ^ mag01[y & 0x1UL]; - } - for (; kk < N - 1; ++kk) { - y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK); - mt[kk] = mt[kk + (M - N)] ^ (y >> 1) ^ mag01[y & 0x1UL]; - } - y = (mt[N - 1] & UPPER_MASK) | (mt[0] & LOWER_MASK); - mt[N - 1] = mt[M - 1] ^ (y >> 1) ^ mag01[y & 0x1UL]; - mti = 0; - } - y = mt[mti++]; - y ^= (y >> 11); - y ^= (y << 7) & 0x9d2c5680UL; - y ^= (y << 15) & 0xefc60000UL; - y ^= (y >> 18); - return y; -} - -int OldRandomNumberGenerator::next31() { - return static_cast(next32() >> 1); -} - -double OldRandomNumberGenerator::next_closed() { - unsigned int a = next32() >> 5, b = next32() >> 6; - return (a * 67108864.0 + b) * (1.0 / 9007199254740991.0); -} - -double OldRandomNumberGenerator::next_half_open() { - unsigned int a = next32() >> 5, b = next32() >> 6; - return (a * 67108864.0 + b) * (1.0 / 9007199254740992.0); -} - -double OldRandomNumberGenerator::next_open() { - unsigned int a = next32() >> 5, b = next32() >> 6; - return (0.5 + a * 67108864.0 + b) * (1.0 / 9007199254740991.0); -} - -int OldRandomNumberGenerator::next(int bound) { - unsigned int value; - do { - value = next31(); - } while (value + static_cast(bound) >= 0x80000000UL); - // Just using modulo doesn't lead to uniform distribution. This does. - return static_cast(value % bound); -} diff --git a/experiments/issue269/rng-microbenchmark/old_rng.h b/experiments/issue269/rng-microbenchmark/old_rng.h deleted file mode 100644 index 74a9e9dc87..0000000000 --- a/experiments/issue269/rng-microbenchmark/old_rng.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef OLD_RNG_H -#define OLD_RNG_H - -class OldRandomNumberGenerator { - static const int N = 624; - unsigned int mt[N]; - int mti; -public: - OldRandomNumberGenerator(); // seed with time-dependent value - OldRandomNumberGenerator(int seed); // seed with int; see comments for seed() - OldRandomNumberGenerator(unsigned int *array, int count); // seed with array - OldRandomNumberGenerator(const OldRandomNumberGenerator ©); - OldRandomNumberGenerator &operator=(const OldRandomNumberGenerator ©); - - void seed(int s); - void seed(unsigned int *array, int len); - - unsigned int next32(); // random integer in [0..2^32-1] - int next31(); // random integer in [0..2^31-1] - double next_half_open(); // random float in [0..1), 2^53 possible values - double next_closed(); // random float in [0..1], 2^53 possible values - double next_open(); // random float in (0..1), 2^53 possible values - int next(int bound); // random integer in [0..bound), bound < 2^31 - int operator()(int bound) { // same as next() - return next(bound); - } - double operator()() { // same as next_half_open() - return next_half_open(); - } -}; - -/* - TODO: Add a static assertion that guarantees that ints are 32 bit. - In cases where they are not, need to adapt the code. - */ - -/* - Notes on seeding - - 1. Seeding with an integer - To avoid different seeds mapping to the same sequence, follow one of - the following two conventions: - a) Only use seeds in 0..2^31-1 (preferred) - b) Only use seeds in -2^30..2^30-1 (2-complement machines only) - - 2. Seeding with an array (die-hard seed method) - The length of the array, len, can be arbitrarily high, but for lengths greater - than N, collisions are common. If the seed is of high quality, using more than - N values does not make sense. -*/ - -#endif diff --git a/experiments/issue269/rng-microbenchmark/rng.cc b/experiments/issue269/rng-microbenchmark/rng.cc deleted file mode 100644 index 95163fc2ad..0000000000 --- a/experiments/issue269/rng-microbenchmark/rng.cc +++ /dev/null @@ -1,57 +0,0 @@ -#include "rng.h" - -#include -#include - -using namespace std; - - -RandomNumberGenerator::RandomNumberGenerator() { - unsigned int secs = chrono::system_clock::now().time_since_epoch().count(); - seed(secs); -} - -RandomNumberGenerator::RandomNumberGenerator(int seed_) { - seed(seed_); -} - -void RandomNumberGenerator::seed(int seed) { - rng.seed(seed); -} - -double RandomNumberGenerator::operator()() { - uniform_real_distribution distribution(0.0, 1.0); - return distribution(rng); -} - -int RandomNumberGenerator::operator()(int bound) { - assert(bound > 0); - uniform_int_distribution distribution(0, bound - 1); - return distribution(rng); -} - - -unsigned int RandomNumberGenerator::next32_old() { - return rng(); -} - - -int RandomNumberGenerator::next31_old() { - return static_cast(next32_old() >> 1); -} - - -double RandomNumberGenerator::get_double_old() { - unsigned int a = next32_old() >> 5, b = next32_old() >> 6; - return (a * 67108864.0 + b) * (1.0 / 9007199254740992.0); -} - - -int RandomNumberGenerator::get_int_old(int bound) { - unsigned int value; - do { - value = next31_old(); - } while (value + static_cast(bound) >= 0x80000000UL); - // Just using modulo doesn't lead to uniform distribution. This does. - return static_cast(value % bound); -} diff --git a/experiments/issue269/rng-microbenchmark/rng.h b/experiments/issue269/rng-microbenchmark/rng.h deleted file mode 100644 index 672e66925e..0000000000 --- a/experiments/issue269/rng-microbenchmark/rng.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef RNG_H -#define RNG_H - -#include -#include -#include - -class RandomNumberGenerator { - // Mersenne Twister random number generator. - std::mt19937 rng; -public: - RandomNumberGenerator(); // seed with time-dependent value - explicit RandomNumberGenerator(int seed_); // seed with integer - RandomNumberGenerator(const RandomNumberGenerator &) = delete; - RandomNumberGenerator &operator=(const RandomNumberGenerator &) = delete; - - void seed(int seed); - - double operator()(); // random double in [0..1), 2^53 possible values - int operator()(int bound); // random integer in [0..bound), bound < 2^31 - - unsigned int next32_old(); - int next31_old(); - double get_double_old(); - int get_int_old(int bound); - - template - void shuffle(std::vector &vec) { - std::shuffle(vec.begin(), vec.end(), rng); - } -}; - -#endif diff --git a/experiments/issue269/sat.py b/experiments/issue269/sat.py deleted file mode 100755 index 76120ed4a9..0000000000 --- a/experiments/issue269/sat.py +++ /dev/null @@ -1,28 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue269-base", "issue269-v1"] -LIMITS = {"search_time": 600} -SUITE = suites.suite_satisficing_with_ipc11() - -CONFIGS = { - "random-successors": ["--search", "lazy_greedy(ff(),randomize_successors=true)"], - "pareto-open-list": [ - "--heuristic", "h=ff()", - "--search", "eager(pareto([sum([g(), h]), h]), reopen_closed=true, pathmax=false,f_eval=sum([g(), h]))"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue311/common_setup.py b/experiments/issue311/common_setup.py deleted file mode 100644 index 338314a650..0000000000 --- a/experiments/issue311/common_setup.py +++ /dev/null @@ -1,384 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue311/relativescatter.py b/experiments/issue311/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue311/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue311/v1.py b/experiments/issue311/v1.py deleted file mode 100755 index 4ba558c3d6..0000000000 --- a/experiments/issue311/v1.py +++ /dev/null @@ -1,64 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -''' -Script to check correctness of eager_wastar. - -Comparing eager_wastar with the equivalent version using -eager(single(w*h), reopen_closed=true). - -Results should be the same for a given same value w. -''' - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue311"] -CONFIGS = [ - IssueConfig('eager_wastar_w1', ['--search', 'eager_wastar([lmcut], w=1)'], [], driver_options=['--overall-time-limit', '5m']), - IssueConfig('eager_wastar_w2', ['--search', 'eager_wastar([lmcut], w=2)'], [], driver_options=['--overall-time-limit', '5m']), - IssueConfig('eager_wastar_w5', ['--search', 'eager_wastar([lmcut], w=5)'], [], driver_options=['--overall-time-limit', '5m']), - IssueConfig('eager_wastar_w100', ['--search', 'eager_wastar([lmcut], w=100)'], [], driver_options=['--overall-time-limit', '5m']), - - IssueConfig('eager_single_openlist_w1', ['--search', 'eager(single(sum([g(), weight(lmcut, 1)])), reopen_closed=true)'], [], driver_options=['--overall-time-limit', '5m']), - IssueConfig('eager_single_openlist_w2', ['--search', 'eager(single(sum([g(), weight(lmcut, 2)])), reopen_closed=true)'], [], driver_options=['--overall-time-limit', '5m']), - IssueConfig('eager_single_openlist_w5', ['--search', 'eager(single(sum([g(), weight(lmcut, 5)])), reopen_closed=true)'], [], driver_options=['--overall-time-limit', '5m']), - IssueConfig('eager_single_openlist_w100', ['--search', 'eager(single(sum([g(), weight(lmcut, 100)])), reopen_closed=true)'], [], driver_options=['--overall-time-limit', '5m']), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue311/v2.py b/experiments/issue311/v2.py deleted file mode 100755 index a5d49396b4..0000000000 --- a/experiments/issue311/v2.py +++ /dev/null @@ -1,58 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -''' -Script to compare WA* versions with w=1 with A* - -Results should be slightly different because of the tie-breaking -strategy used by the astar search -''' - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue311"] -CONFIGS = [ - IssueConfig('eager_wastar_w1', ['--search', 'eager_wastar([lmcut], w=1)'], [], driver_options=['--overall-time-limit', '5m']), - - IssueConfig('eager_single_openlist_w1', ['--search', 'eager(single(sum([g(), weight(lmcut, 1)])), reopen_closed=true)'], [], driver_options=['--overall-time-limit', '5m']), - - IssueConfig('astar', ['--search', 'astar(lmcut)'], [], driver_options=['--overall-time-limit', '5m']), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue311/v3.py b/experiments/issue311/v3.py deleted file mode 100755 index 3d733a9122..0000000000 --- a/experiments/issue311/v3.py +++ /dev/null @@ -1,85 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -''' -Script to test possible eager version of LAMA - -''' - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue311"] -CONFIGS = [ - IssueConfig('lama', [], driver_options=["--alias", "seq-sat-lama-2011"]), - IssueConfig('eager_lama', [ - "--if-unit-cost", - "--evaluator", - "hlm=lama_synergy(lm_rhw(reasonable_orders=true))", - "--evaluator", "hff=ff_synergy(hlm)", - "--search", """iterated([ - lazy_greedy([hff,hlm],preferred=[hff,hlm]), - eager_wastar([hff,hlm],preferred=[hff,hlm],w=5), - eager_wastar([hff,hlm],preferred=[hff,hlm],w=3), - eager_wastar([hff,hlm],preferred=[hff,hlm],w=2), - eager_wastar([hff,hlm],preferred=[hff,hlm],w=1) - ],repeat_last=true,continue_on_fail=true)""", - "--if-non-unit-cost", - "--evaluator", - "hlm1=lama_synergy(lm_rhw(reasonable_orders=true," - " lm_cost_type=one),transform=adapt_costs(one))", - "--evaluator", "hff1=ff_synergy(hlm1)", - "--evaluator", - "hlm2=lama_synergy(lm_rhw(reasonable_orders=true," - " lm_cost_type=plusone),transform=adapt_costs(plusone))", - "--evaluator", "hff2=ff_synergy(hlm2)", - "--search", """iterated([ - lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], - cost_type=one,reopen_closed=false), - lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2], - reopen_closed=false), - eager_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5), - eager_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3), - eager_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2), - eager_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1) - ],repeat_last=true,continue_on_fail=true)""", - "--always" - ]) -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue344/common_setup.py b/experiments/issue344/common_setup.py deleted file mode 100644 index ddc317c07a..0000000000 --- a/experiments/issue344/common_setup.py +++ /dev/null @@ -1,268 +0,0 @@ -# -*- coding: utf-8 -*- - -import os.path -import platform - -from lab.environments import MaiaEnvironment -from lab.steps import Step - -from downward.checkouts import Translator, Preprocessor, Planner -from downward.experiments import DownwardExperiment -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the filename of the main script, e.g. - "/ham/spam/eggs.py" => "eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Found by searching upwards in the directory tree from the main - script until a directory with a subdirectory named ".hg" is found.""" - path = os.path.abspath(get_script_dir()) - while True: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - - -def build_combos_with_names(repo, combinations, revisions, search_revisions): - """Build (combos, combo_names) lists for the given planner revisions. - - combos and combo_names are parallel lists, where combos contains - (Translator, Preprocessor, Search) triples and combo_names are the names - for the respective combinations that lab uses internally. - - See MyExperiment.__init__ for documentation of the parameters - combinations, revisions and search_revisions.""" - combos = [] - names = [] - def build(*rev_triple): - combo, name = build_combo_with_name(repo, *rev_triple) - combos.append(combo) - names.append(name) - - for triple in combinations or []: - build(triple) - for rev in revisions or []: - build(rev, rev, rev) - for rev in search_revisions or []: - build(search_revisions[0], search_revisions[0], rev) - - return combos, names - - -def build_combo_with_name(repo, trans_rev, preprocess_rev, search_rev): - """Generate a tuple (combination, name) for the given revisions. - - combination is a (Translator, Preprocessor, Search) tuple - and name is the name that lab uses to refer to it.""" - # TODO: In the future, it would be nice if we didn't need the name - # information any more, as it is somewhat of an implementation - # detail. - combo = (Translator(repo, trans_rev), - Preprocessor(repo, preprocess_rev), - Planner(repo, search_rev)) - if trans_rev == preprocess_rev == search_rev: - name = str(search_rev) - else: - name = "%s-%s-%s" % (trans_rev, preprocess_rev, search_rev) - return combo, name - - -def is_on_grid(): - """Returns True if the current machine is on the maia grid. - - Implemented by checking if host name ends with ".cluster". - """ - return platform.node().endswith(".cluster") - - -class MyExperiment(DownwardExperiment): - DEFAULT_TEST_SUITE = [ - "zenotravel:pfile1", - "zenotravel:pfile2", - ] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "total_time", - "search_time", - "memory", - "expansions_until_last_jump", - ] - - """Wrapper for DownwardExperiment with a few convenience features.""" - - def __init__(self, configs=None, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - combinations=None, suite=None, do_test_run="auto", - test_suite=DEFAULT_TEST_SUITE, **kwargs): - """Create a DownwardExperiment with some convenience features. - - If "configs" is specified, it should be a dict of {nick: - cmdline} pairs that sets the planner configurations to test. - - If "grid_priority" is specified and no environment is - specifically requested in **kwargs, use the maia environment - with the specified priority. - - If "path" is not specified, the experiment data path is - derived automatically from the main script's filename. - - If "repo" is not specified, the repository base is derived - automatically from the main script's path. - - If "combinations" is specified, it should be a non-empty list - of revision triples of the form (translator_rev, - preprocessor_rev, search_rev). - - If "revisions" is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. - - If "search_revisions" is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All experiments use the - translator and preprocessor component of the first - revision. - - It is possible to specify a mixture of"combinations", - "revisions" and "search_revisions". - - If "suite" is specified, it should specify a problem suite. - - If "do_test_run" is true, the "grid_priority" and - "environment" (from the base class) arguments are ignored and - a local experiment with default arguments is run instead. In - this case, the "suite" argument is replaced by the "test_suite" - argument. - - If "do_test_run" is the string "auto" (the default), then - do_test_run is set to False when run on a grid machine and - to True otherwise. A grid machine is identified as one whose - node name ends with ".cluster". - """ - - if do_test_run == "auto": - do_test_run = not is_on_grid() - - if do_test_run: - # In a test run, overwrite certain arguments. - grid_priority = None - kwargs.pop("environment", None) - suite = test_suite - - if grid_priority is not None and "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - combinations, self._combination_names = build_combos_with_names( - repo=repo, - combinations=combinations, - revisions=revisions, - search_revisions=search_revisions) - kwargs["combinations"] = combinations - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - if configs is not None: - for nick, config in configs.items(): - self.add_config(nick, config) - - if suite is not None: - self.add_suite(suite) - - self._report_prefix = get_experiment_name() - - def add_comparison_table_step(self, attributes=None): - revisions = self._combination_names - if len(revisions) != 2: - # TODO: Should generalize this by offering a general - # grouping function and then comparing any pair of - # settings in the same group. - raise NotImplementedError("need two revisions") - if attributes is None: - attributes = self.DEFAULT_TABLE_ATTRIBUTES - report = CompareRevisionsReport(*revisions, attributes=attributes) - self.add_report(report, outfile="%s-compare.html" % self._report_prefix) - - def add_scatter_plot_step(self, attributes=None): - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - revisions = self._combination_names - if len(revisions) != 2: - # TODO: Should generalize this by offering a general - # grouping function and then comparing any pair of - # settings in the same group. - raise NotImplementedError("need two revisions") - scatter_dir = os.path.join(self.eval_dir, "scatter") - def make_scatter_plots(): - configs = [conf[0] for conf in self.configs] - for nick in configs: - config_before = "%s-%s" % (revisions[0], nick) - config_after = "%s-%s" % (revisions[1], nick) - for attribute in attributes: - name = "%s-%s-%s" % (self._report_prefix, attribute, nick) - report = ScatterPlotReport( - filter_config=[config_before, config_after], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue344/exp14.py b/experiments/issue344/exp14.py deleted file mode 100755 index 1531f11e2e..0000000000 --- a/experiments/issue344/exp14.py +++ /dev/null @@ -1,21 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import downward.configs -import downward.suites - -import common_setup - - -exp = common_setup.MyExperiment( - grid_priority=0, - search_revisions=["issue344-base", "issue344-v5"], - configs=downward.configs.default_configs_optimal(), - suite=downward.suites.suite_optimal_with_ipc11(), - do_test_run="auto" - ) - -exp.add_comparison_table_step() -exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue348/common_setup.py b/experiments/issue348/common_setup.py deleted file mode 100644 index 8abe5bf745..0000000000 --- a/experiments/issue348/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - #self.add_step( - # 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - #self.add_step( - # "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue348/relativescatter.py b/experiments/issue348/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue348/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue348/v14-blind.py b/experiments/issue348/v14-blind.py deleted file mode 100755 index 6a10b26a09..0000000000 --- a/experiments/issue348/v14-blind.py +++ /dev/null @@ -1,64 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue348-v13", "issue348-v14"] -CONFIGS = [ - IssueConfig("blind", ["--search", "astar(blind())"]), -] - -ADL_DOMAINS = [ - "assembly", - "miconic-fulladl", - "openstacks", - "openstacks-opt08-adl", - "optical-telegraphs", - "philosophers", - "psr-large", - "psr-middle", - "trucks", -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE + ADL_DOMAINS -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE + ["openstacks-opt08-adl:p01.pddl"] - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time"], suffix="-strips", filter_domain=common_setup.DEFAULT_OPTIMAL_SUITE) -exp.add_scatter_plot_step(relative=True, attributes=["total_time"], suffix="-adl", filter_domain=ADL_DOMAINS) - -exp.run_steps() diff --git a/experiments/issue348/v19-blind.py b/experiments/issue348/v19-blind.py deleted file mode 100755 index 186a2b86ae..0000000000 --- a/experiments/issue348/v19-blind.py +++ /dev/null @@ -1,64 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue348-base", "issue348-v19"] -CONFIGS = [ - IssueConfig("blind", ["--search", "astar(blind())"]), -] - -ADL_DOMAINS = [ - "assembly", - "miconic-fulladl", - "openstacks", - "openstacks-opt08-adl", - "optical-telegraphs", - "philosophers", - "psr-large", - "psr-middle", - "trucks", -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE + ADL_DOMAINS -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE + ["openstacks-opt08-adl:p01.pddl"] - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time"], suffix="-strips", filter_domain=common_setup.DEFAULT_OPTIMAL_SUITE) -exp.add_scatter_plot_step(relative=True, attributes=["total_time"], suffix="-adl", filter_domain=ADL_DOMAINS) - -exp.run_steps() diff --git a/experiments/issue348/v24.py b/experiments/issue348/v24.py deleted file mode 100755 index ee845cea69..0000000000 --- a/experiments/issue348/v24.py +++ /dev/null @@ -1,63 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -#from lab.environments import FreiburgSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue348-base", "issue348-version2-v3", "issue348-v24"] -CONFIGS = [ - IssueConfig("lama", [], driver_options=["--alias", "lama-first"]), - IssueConfig("ehc-ff", ["--search", "ehc(ff())"]), - IssueConfig("ipdb", ["--search", "astar(ipdb())"]), - IssueConfig("lmcut", ["--search", "astar(lmcut())"]), - IssueConfig("blind", ["--search", "astar(blind())"]), - IssueConfig("lazy", [ - "--evaluator", - "hff=ff()", - "--evaluator", - "hcea=cea()", - "--search", - "lazy_greedy([hff, hcea], preferred=[hff, hcea])"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) -#ENVIRONMENT = FreiburgSlurmEnvironment() - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time", "memory"]) - -exp.run_steps() diff --git a/experiments/issue348/version1_v2-version2_v2-base.py b/experiments/issue348/version1_v2-version2_v2-base.py deleted file mode 100755 index 296aab62da..0000000000 --- a/experiments/issue348/version1_v2-version2_v2-base.py +++ /dev/null @@ -1,74 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.environments import FreiburgSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = "/home/drexlerd/benchmarks/downward-benchmarks" -REVISIONS = ["issue348-base", "issue348-version1-v2", "issue348-version2-v2"] -CONFIGS = [ - IssueConfig("lama", [], driver_options=["--alias", "lama-first"]), - IssueConfig("ehc-ff", ["--search", "ehc(ff())"]), - #IssueConfig("ipdb", ["--search", "astar(ipdb())"]), - #IssueConfig("lmcut", ["--search", "astar(lmcut())"]), - IssueConfig("blind", ["--search", "astar(blind())"]), - IssueConfig("lazy", [ - "--evaluator", - "hff=ff()", - "--evaluator", - "hcea=cea()", - "--search", - "lazy_greedy([hff, hcea], preferred=[hff, hcea])"]), -] - -ADL_DOMAINS = [ - "assembly", - "miconic-fulladl", - "openstacks", - "openstacks-opt08-adl", - "optical-telegraphs", - "philosophers", - "psr-large", - "psr-middle", - "trucks", -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE + ADL_DOMAINS -#ENVIRONMENT = BaselSlurmEnvironment( -# partition="infai_2", -# email="florian.pommerening@unibas.ch", -# export=["PATH", "DOWNWARD_BENCHMARKS"]) -ENVIRONMENT = FreiburgSlurmEnvironment() - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time", "memory"]) - -exp.run_steps() diff --git a/experiments/issue348/version1_v3-version2_v3-base.py b/experiments/issue348/version1_v3-version2_v3-base.py deleted file mode 100755 index 738400f4e0..0000000000 --- a/experiments/issue348/version1_v3-version2_v3-base.py +++ /dev/null @@ -1,74 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.environments import FreiburgSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = "/home/drexlerd/benchmarks/downward-benchmarks" -REVISIONS = ["issue348-base", "issue348-version1-v3", "issue348-version2-v3"] -CONFIGS = [ - IssueConfig("lama", [], driver_options=["--alias", "lama-first"]), - IssueConfig("ehc-ff", ["--search", "ehc(ff())"]), - IssueConfig("ipdb", ["--search", "astar(ipdb())"]), - #IssueConfig("lmcut", ["--search", "astar(lmcut())"]), - IssueConfig("blind", ["--search", "astar(blind())"]), - #IssueConfig("lazy", [ -# "--evaluator", -# "hff=ff()", -# "--evaluator", -# "hcea=cea()", -# "--search", -# "lazy_greedy([hff, hcea], preferred=[hff, hcea])"]), -] - -ADL_DOMAINS = [ - "assembly", - "miconic-fulladl", - "openstacks", - "openstacks-opt08-adl", - "optical-telegraphs", - "philosophers", - "psr-large", - "psr-middle", - "trucks", -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE + ADL_DOMAINS -#ENVIRONMENT = BaselSlurmEnvironment( -# partition="infai_2", -# email="florian.pommerening@unibas.ch", -# export=["PATH", "DOWNWARD_BENCHMARKS"]) -ENVIRONMENT = FreiburgSlurmEnvironment() - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time", "memory"]) - -exp.run_steps() diff --git a/experiments/issue385/common_setup.py b/experiments/issue385/common_setup.py deleted file mode 100644 index 884e6b0323..0000000000 --- a/experiments/issue385/common_setup.py +++ /dev/null @@ -1,231 +0,0 @@ -# -*- coding: utf-8 -*- - -import os.path - -from lab.environments import MaiaEnvironment -from lab.steps import Step - -from downward.checkouts import Translator, Preprocessor, Planner -from downward.experiments import DownwardExperiment -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the filename of the main script, e.g. - "/ham/spam/eggs.py" => "eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Found by searching upwards in the directory tree from the main - script until a directory with a subdirectory named ".hg" is found.""" - path = os.path.abspath(get_script_dir()) - while True: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - - -class MyExperiment(DownwardExperiment): - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "total_time", - "search_time", - "memory", - "expansions_until_last_jump", - ] - - """Wrapper for DownwardExperiment with a few convenience features.""" - - def __init__(self, configs=None, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - If "configs" is specified, it should be a dict of {nick: - cmdline} pairs that sets the planner configurations to test. - - If "grid_priority" is specified and no environment is - specifically requested in **kwargs, use the maia environment - with the specified priority. - - If "path" is not specified, the experiment data path is - derived automatically from the main script's filename. - - If "repo" is not specified, the repository base is derived - automatically from the main script's path. - - If "revisions" is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. - - If "search_revisions" is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All experiments use the - translator and preprocessor component of the first - revision. - - If "suite" is specified, it should specify a problem suite. - - Options "combinations" (from the base class), "revisions" and - "search_revisions" are mutually exclusive.""" - - if grid_priority is not None and "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - num_rev_opts_specified = ( - int(revisions is not None) + - int(search_revisions is not None) + - int(kwargs.get("combinations") is not None)) - - if num_rev_opts_specified > 1: - raise ValueError('must specify exactly one of "revisions", ' - '"search_revisions" or "combinations"') - - # See add_comparison_table_step for more on this variable. - self._HACK_revisions = revisions - - if revisions is not None: - if not revisions: - raise ValueError("revisions cannot be empty") - combinations = [(Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions] - kwargs["combinations"] = combinations - - if search_revisions is not None: - if not search_revisions: - raise ValueError("search_revisions cannot be empty") - base_rev = search_revisions[0] - translator = Translator(repo, base_rev) - preprocessor = Preprocessor(repo, base_rev) - combinations = [(translator, preprocessor, Planner(repo, rev)) - for rev in search_revisions] - kwargs["combinations"] = combinations - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - if configs is not None: - for nick, config in configs.items(): - self.add_config(nick, config) - - if suite is not None: - self.add_suite(suite) - - self._report_prefix = get_experiment_name() - - def add_comparison_table_step(self, attributes=None): - revisions = self._HACK_revisions - if revisions is None: - # TODO: It's not clear to me what a "revision" in the - # overall context of the code really is, e.g. when keeping - # the translator and preprocessor method fixed and only - # changing the search component. It's also not really - # clear to me how the interface of the Compare... reports - # works and how to use it more generally. Hence the - # present hack. - - # Ideally, this method should look at the table columns we - # have (defined by planners and planner configurations), - # pair them up in a suitable way, either controlled by a - # convenience parameter or a more general grouping method, - # and then use this to define which pairs go together. - raise NotImplementedError( - "only supported when specifying revisions in __init__") - - if attributes is None: - attributes = self.DEFAULT_TABLE_ATTRIBUTES - if len(revisions) == 2: - report = CompareRevisionsReport(*revisions, attributes=attributes) - self.add_report(report, outfile="%s-compare.html" % self._report_prefix) - else: - # HACK: assumes the first revision is the noe everything else is compared against - for rev in revisions[1:]: - report = CompareRevisionsReport(revisions[0], rev, attributes=attributes) - self.add_report(report, outfile="%s-compare-%s.html" % (self._report_prefix, rev)) - - - def add_scatter_plot_step(self, attributes=None): - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - revisions = self._HACK_revisions - if revisions is None: - # TODO: See add_comparison_table_step. - raise NotImplementedError( - "only supported when specifying revisions in __init__") - # HACK: assumes the first revision is the noe everything else is compared against - for rev in revisions[1:]: - # TODO: Should generalize this, too, by offering a general - # grouping function and then comparing any pair of - # settings in the same group. - scatter_dir = os.path.join(self.eval_dir, "scatter") - def make_scatter_plots(): - configs = [conf[0] for conf in self.configs] - for nick in configs: - config_before = "%s-%s" % (revisions[0], nick) - config_after = "%s-%s" % (rev, nick) - for attribute in attributes: - name = "%s-%s-%s" % (self._report_prefix, attribute, nick) - report = ScatterPlotReport( - filter_config=[config_before, config_after], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, rev, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue385/exp1.py b/experiments/issue385/exp1.py deleted file mode 100755 index 3bf0c363c7..0000000000 --- a/experiments/issue385/exp1.py +++ /dev/null @@ -1,42 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward.suites import suite_optimal_with_ipc11 -from downward.configs import default_configs_optimal - -import common_setup - - -REVS = ["issue385-base", "issue385-v1", "issue385-v2"] -CONFIGS = default_configs_optimal() - -# remove config that is disabled in this branch -del CONFIGS['astar_selmax_lmcut_lmcount'] - -TEST_RUN = True - -if TEST_RUN: - SUITE = "gripper:prob01.pddl" - PRIORITY = None # "None" means local experiment -else: - SUITE = suite_optimal_with_ipc11() - PRIORITY = 0 # number means maia experiment - - -# TODO: I'd like to specify "search_revisions" (which uses the same -# translator and preprocessor for everything) instead of "revisions" -# here, but I can't seem to make this work with the REVS argument for -# CompareRevisionsReport. - -exp = common_setup.MyExperiment( - grid_priority=PRIORITY, - revisions=REVS, - configs=CONFIGS, - suite=SUITE - ) - - -exp.add_comparison_table_step() -exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue385/exp2.py b/experiments/issue385/exp2.py deleted file mode 100755 index d84a68b2c8..0000000000 --- a/experiments/issue385/exp2.py +++ /dev/null @@ -1,42 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward.suites import suite_optimal_with_ipc11 -from downward.configs import default_configs_optimal - -import common_setup - - -REVS = ["issue385-v3-base", "issue385-v3"] -CONFIGS = default_configs_optimal() - -# remove config that is disabled in this branch -del CONFIGS['astar_selmax_lmcut_lmcount'] - -TEST_RUN = True - -if TEST_RUN: - SUITE = "gripper:prob01.pddl" - PRIORITY = None # "None" means local experiment -else: - SUITE = suite_optimal_with_ipc11() - PRIORITY = 0 # number means maia experiment - - -# TODO: I'd like to specify "search_revisions" (which uses the same -# translator and preprocessor for everything) instead of "revisions" -# here, but I can't seem to make this work with the REVS argument for -# CompareRevisionsReport. - -exp = common_setup.MyExperiment( - grid_priority=PRIORITY, - revisions=REVS, - configs=CONFIGS, - suite=SUITE - ) - - -exp.add_comparison_table_step() -exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue385/exp3.py b/experiments/issue385/exp3.py deleted file mode 100755 index 57e0db4622..0000000000 --- a/experiments/issue385/exp3.py +++ /dev/null @@ -1,42 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward.suites import suite_satisficing_with_ipc11 -from downward.configs import default_configs_satisficing - -import common_setup - - -REVS = ["issue385-v3-base", "issue385-v3"] -CONFIGS = default_configs_satisficing() - -# remove config that is disabled in this branch -del CONFIGS['astar_selmax_lmcut_lmcount'] - -TEST_RUN = True - -if TEST_RUN: - SUITE = "gripper:prob01.pddl" - PRIORITY = None # "None" means local experiment -else: - SUITE = suite_satisficing_with_ipc11() - PRIORITY = 0 # number means maia experiment - - -# TODO: I'd like to specify "search_revisions" (which uses the same -# translator and preprocessor for everything) instead of "revisions" -# here, but I can't seem to make this work with the REVS argument for -# CompareRevisionsReport. - -exp = common_setup.MyExperiment( - grid_priority=PRIORITY, - revisions=REVS, - configs=CONFIGS, - suite=SUITE - ) - - -exp.add_comparison_table_step() -exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue386/common_setup.py b/experiments/issue386/common_setup.py deleted file mode 100644 index 884e6b0323..0000000000 --- a/experiments/issue386/common_setup.py +++ /dev/null @@ -1,231 +0,0 @@ -# -*- coding: utf-8 -*- - -import os.path - -from lab.environments import MaiaEnvironment -from lab.steps import Step - -from downward.checkouts import Translator, Preprocessor, Planner -from downward.experiments import DownwardExperiment -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the filename of the main script, e.g. - "/ham/spam/eggs.py" => "eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Found by searching upwards in the directory tree from the main - script until a directory with a subdirectory named ".hg" is found.""" - path = os.path.abspath(get_script_dir()) - while True: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - - -class MyExperiment(DownwardExperiment): - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "total_time", - "search_time", - "memory", - "expansions_until_last_jump", - ] - - """Wrapper for DownwardExperiment with a few convenience features.""" - - def __init__(self, configs=None, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - If "configs" is specified, it should be a dict of {nick: - cmdline} pairs that sets the planner configurations to test. - - If "grid_priority" is specified and no environment is - specifically requested in **kwargs, use the maia environment - with the specified priority. - - If "path" is not specified, the experiment data path is - derived automatically from the main script's filename. - - If "repo" is not specified, the repository base is derived - automatically from the main script's path. - - If "revisions" is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. - - If "search_revisions" is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All experiments use the - translator and preprocessor component of the first - revision. - - If "suite" is specified, it should specify a problem suite. - - Options "combinations" (from the base class), "revisions" and - "search_revisions" are mutually exclusive.""" - - if grid_priority is not None and "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - num_rev_opts_specified = ( - int(revisions is not None) + - int(search_revisions is not None) + - int(kwargs.get("combinations") is not None)) - - if num_rev_opts_specified > 1: - raise ValueError('must specify exactly one of "revisions", ' - '"search_revisions" or "combinations"') - - # See add_comparison_table_step for more on this variable. - self._HACK_revisions = revisions - - if revisions is not None: - if not revisions: - raise ValueError("revisions cannot be empty") - combinations = [(Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions] - kwargs["combinations"] = combinations - - if search_revisions is not None: - if not search_revisions: - raise ValueError("search_revisions cannot be empty") - base_rev = search_revisions[0] - translator = Translator(repo, base_rev) - preprocessor = Preprocessor(repo, base_rev) - combinations = [(translator, preprocessor, Planner(repo, rev)) - for rev in search_revisions] - kwargs["combinations"] = combinations - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - if configs is not None: - for nick, config in configs.items(): - self.add_config(nick, config) - - if suite is not None: - self.add_suite(suite) - - self._report_prefix = get_experiment_name() - - def add_comparison_table_step(self, attributes=None): - revisions = self._HACK_revisions - if revisions is None: - # TODO: It's not clear to me what a "revision" in the - # overall context of the code really is, e.g. when keeping - # the translator and preprocessor method fixed and only - # changing the search component. It's also not really - # clear to me how the interface of the Compare... reports - # works and how to use it more generally. Hence the - # present hack. - - # Ideally, this method should look at the table columns we - # have (defined by planners and planner configurations), - # pair them up in a suitable way, either controlled by a - # convenience parameter or a more general grouping method, - # and then use this to define which pairs go together. - raise NotImplementedError( - "only supported when specifying revisions in __init__") - - if attributes is None: - attributes = self.DEFAULT_TABLE_ATTRIBUTES - if len(revisions) == 2: - report = CompareRevisionsReport(*revisions, attributes=attributes) - self.add_report(report, outfile="%s-compare.html" % self._report_prefix) - else: - # HACK: assumes the first revision is the noe everything else is compared against - for rev in revisions[1:]: - report = CompareRevisionsReport(revisions[0], rev, attributes=attributes) - self.add_report(report, outfile="%s-compare-%s.html" % (self._report_prefix, rev)) - - - def add_scatter_plot_step(self, attributes=None): - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - revisions = self._HACK_revisions - if revisions is None: - # TODO: See add_comparison_table_step. - raise NotImplementedError( - "only supported when specifying revisions in __init__") - # HACK: assumes the first revision is the noe everything else is compared against - for rev in revisions[1:]: - # TODO: Should generalize this, too, by offering a general - # grouping function and then comparing any pair of - # settings in the same group. - scatter_dir = os.path.join(self.eval_dir, "scatter") - def make_scatter_plots(): - configs = [conf[0] for conf in self.configs] - for nick in configs: - config_before = "%s-%s" % (revisions[0], nick) - config_after = "%s-%s" % (rev, nick) - for attribute in attributes: - name = "%s-%s-%s" % (self._report_prefix, attribute, nick) - report = ScatterPlotReport( - filter_config=[config_before, config_after], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, rev, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue386/exp1.py b/experiments/issue386/exp1.py deleted file mode 100755 index ae5ea9dc1f..0000000000 --- a/experiments/issue386/exp1.py +++ /dev/null @@ -1,39 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward.suites import suite_optimal_with_ipc11 -from downward.configs import default_configs_optimal - -import common_setup - - -REVS = ["issue386-base", "issue386-v1"] -CONFIGS = {'astar_ipdb': ['--search', 'astar(ipdb)']} # default_configs_optimal() - -TEST_RUN = True - -if TEST_RUN: - SUITE = "gripper:prob01.pddl" - PRIORITY = None # "None" means local experiment -else: - SUITE = suite_optimal_with_ipc11() - PRIORITY = 0 # number means maia experiment - - -# TODO: I'd like to specify "search_revisions" (which uses the same -# translator and preprocessor for everything) instead of "revisions" -# here, but I can't seem to make this work with the REVS argument for -# CompareRevisionsReport. - -exp = common_setup.MyExperiment( - grid_priority=PRIORITY, - revisions=REVS, - configs=CONFIGS, - suite=SUITE - ) - - -exp.add_comparison_table_step() -exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue386/exp2.py b/experiments/issue386/exp2.py deleted file mode 100755 index 60f65fbfd8..0000000000 --- a/experiments/issue386/exp2.py +++ /dev/null @@ -1,42 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward.suites import suite_optimal_with_ipc11 -from downward.configs import default_configs_optimal - -import common_setup - - -REVS = ["issue386-base", "issue386-v2"] -CONFIGS = default_configs_optimal() - -# remove config that is disabled in this branch -del CONFIGS['astar_selmax_lmcut_lmcount'] - -TEST_RUN = True - -if TEST_RUN: - SUITE = "gripper:prob01.pddl" - PRIORITY = None # "None" means local experiment -else: - SUITE = suite_optimal_with_ipc11() - PRIORITY = 0 # number means maia experiment - - -# TODO: I'd like to specify "search_revisions" (which uses the same -# translator and preprocessor for everything) instead of "revisions" -# here, but I can't seem to make this work with the REVS argument for -# CompareRevisionsReport. - -exp = common_setup.MyExperiment( - grid_priority=PRIORITY, - revisions=REVS, - configs=CONFIGS, - suite=SUITE - ) - - -exp.add_comparison_table_step() -exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue386/exp3.py b/experiments/issue386/exp3.py deleted file mode 100755 index 5599ded982..0000000000 --- a/experiments/issue386/exp3.py +++ /dev/null @@ -1,42 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward.suites import suite_optimal_with_ipc11 -from downward.configs import default_configs_optimal - -import common_setup - - -REVS = ["issue386-base", "issue386-v3"] -CONFIGS = default_configs_optimal() - -# remove config that is disabled in this branch -del CONFIGS['astar_selmax_lmcut_lmcount'] - -TEST_RUN = True - -if TEST_RUN: - SUITE = "gripper:prob01.pddl" - PRIORITY = None # "None" means local experiment -else: - SUITE = suite_optimal_with_ipc11() - PRIORITY = 0 # number means maia experiment - - -# TODO: I'd like to specify "search_revisions" (which uses the same -# translator and preprocessor for everything) instead of "revisions" -# here, but I can't seem to make this work with the REVS argument for -# CompareRevisionsReport. - -exp = common_setup.MyExperiment( - grid_priority=PRIORITY, - revisions=REVS, - configs=CONFIGS, - suite=SUITE - ) - - -exp.add_comparison_table_step() -exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue387/common_setup.py b/experiments/issue387/common_setup.py deleted file mode 100644 index 10828130b2..0000000000 --- a/experiments/issue387/common_setup.py +++ /dev/null @@ -1,224 +0,0 @@ -# -*- coding: utf-8 -*- - -import os.path - -from lab.environments import MaiaEnvironment -from lab.steps import Step - -from downward.checkouts import Translator, Preprocessor, Planner -from downward.experiments import DownwardExperiment -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the filename of the main script, e.g. - "/ham/spam/eggs.py" => "eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Found by searching upwards in the directory tree from the main - script until a directory with a subdirectory named ".hg" is found.""" - path = os.path.abspath(get_script_dir()) - while True: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - - -class MyExperiment(DownwardExperiment): - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "total_time", - "search_time", - "memory", - "expansions_until_last_jump", - ] - - """Wrapper for DownwardExperiment with a few convenience features.""" - - def __init__(self, configs=None, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - If "configs" is specified, it should be a dict of {nick: - cmdline} pairs that sets the planner configurations to test. - - If "grid_priority" is specified and no environment is - specifically requested in **kwargs, use the maia environment - with the specified priority. - - If "path" is not specified, the experiment data path is - derived automatically from the main script's filename. - - If "repo" is not specified, the repository base is derived - automatically from the main script's path. - - If "revisions" is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. - - If "search_revisions" is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All experiments use the - translator and preprocessor component of the first - revision. - - If "suite" is specified, it should specify a problem suite. - - Options "combinations" (from the base class), "revisions" and - "search_revisions" are mutually exclusive.""" - - if grid_priority is not None and "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - num_rev_opts_specified = ( - int(revisions is not None) + - int(search_revisions is not None) + - int(kwargs.get("combinations") is not None)) - - if num_rev_opts_specified > 1: - raise ValueError('must specify exactly one of "revisions", ' - '"search_revisions" or "combinations"') - - # See add_comparison_table_step for more on this variable. - self._HACK_revisions = revisions - - if revisions is not None: - if not revisions: - raise ValueError("revisions cannot be empty") - combinations = [(Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions] - kwargs["combinations"] = combinations - - if search_revisions is not None: - if not search_revisions: - raise ValueError("search_revisions cannot be empty") - base_rev = search_revisions[0] - translator = Translator(repo, base_rev) - preprocessor = Preprocessor(repo, base_rev) - combinations = [(translator, preprocessor, Planner(repo, rev)) - for rev in search_revisions] - kwargs["combinations"] = combinations - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - if configs is not None: - for nick, config in configs.items(): - self.add_config(nick, config) - - if suite is not None: - self.add_suite(suite) - - self._report_prefix = get_experiment_name() - - def add_comparison_table_step(self, attributes=None): - revisions = self._HACK_revisions - if revisions is None: - # TODO: It's not clear to me what a "revision" in the - # overall context of the code really is, e.g. when keeping - # the translator and preprocessor method fixed and only - # changing the search component. It's also not really - # clear to me how the interface of the Compare... reports - # works and how to use it more generally. Hence the - # present hack. - - # Ideally, this method should look at the table columns we - # have (defined by planners and planner configurations), - # pair them up in a suitable way, either controlled by a - # convenience parameter or a more general grouping method, - # and then use this to define which pairs go together. - raise NotImplementedError( - "only supported when specifying revisions in __init__") - - if attributes is None: - attributes = self.DEFAULT_TABLE_ATTRIBUTES - report = CompareRevisionsReport(*revisions, attributes=attributes) - self.add_report(report, outfile="%s-compare.html" % self._report_prefix) - - def add_scatter_plot_step(self, attributes=None): - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - revisions = self._HACK_revisions - if revisions is None: - # TODO: See add_comparison_table_step. - raise NotImplementedError( - "only supported when specifying revisions in __init__") - if len(revisions) != 2: - # TODO: Should generalize this, too, by offering a general - # grouping function and then comparing any pair of - # settings in the same group. - raise NotImplementedError("need two revisions") - scatter_dir = os.path.join(self.eval_dir, "scatter") - def make_scatter_plots(): - configs = [conf[0] for conf in self.configs] - for nick in configs: - config_before = "%s-%s" % (revisions[0], nick) - config_after = "%s-%s" % (revisions[1], nick) - for attribute in attributes: - name = "%s-%s-%s" % (self._report_prefix, attribute, nick) - report = ScatterPlotReport( - filter_config=[config_before, config_after], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue387/issue387.py b/experiments/issue387/issue387.py deleted file mode 100755 index 31b32905cb..0000000000 --- a/experiments/issue387/issue387.py +++ /dev/null @@ -1,42 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward.suites import suite_optimal_with_ipc11 -from downward.configs import default_configs_optimal - -import common_setup - - -REVS = ["issue387-base", "issue387-v1"] -CONFIGS = default_configs_optimal() - -# remove config that is disabled in this branch -del CONFIGS['astar_selmax_lmcut_lmcount'] - -TEST_RUN = True - -if TEST_RUN: - SUITE = "gripper:prob01.pddl" - PRIORITY = None # "None" means local experiment -else: - SUITE = suite_optimal_with_ipc11() - PRIORITY = 0 # number means maia experiment - - -# TODO: I'd like to specify "search_revisions" (which uses the same -# translator and preprocessor for everything) instead of "revisions" -# here, but I can't seem to make this work with the REVS argument for -# CompareRevisionsReport. - -exp = common_setup.MyExperiment( - grid_priority=PRIORITY, - revisions=REVS, - configs=CONFIGS, - suite=SUITE - ) - - -exp.add_comparison_table_step() -exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue392/common_setup.py b/experiments/issue392/common_setup.py deleted file mode 100644 index 1fc39393b0..0000000000 --- a/experiments/issue392/common_setup.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue392/lama-nonunit.py b/experiments/issue392/lama-nonunit.py deleted file mode 100755 index 404db64821..0000000000 --- a/experiments/issue392/lama-nonunit.py +++ /dev/null @@ -1,42 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue392-v2"] -LIMITS = {"search_time": 300} - -CONFIGS = {} -for randomize in ["false", "true"]: - for pref_first in ["false", "true"]: - CONFIGS["lama-nonunit-randomize-%(randomize)s-pref_first-%(pref_first)s" % locals()] = [ - "--heuristic", "hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=ONE,cost_type=ONE))", - "--heuristic", "hlm2,hff2=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=PLUSONE,cost_type=PLUSONE))", - "--search", - "iterated([" - "lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,cost_type=ONE,reopen_closed=false)," - "lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,reopen_closed=false)," - "lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=5)," - "lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=3)," - "lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=2)," - "lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=1)]," - "repeat_last=true,continue_on_fail=true)" % locals() - ] - -SUITE = sorted(set(suites.suite_satisficing_with_ipc11()) & - set(suites.suite_diverse_costs())) - - -exp = common_setup.IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) - -exp.add_absolute_report_step() - -exp() diff --git a/experiments/issue392/lama-unit.py b/experiments/issue392/lama-unit.py deleted file mode 100755 index 7924733b63..0000000000 --- a/experiments/issue392/lama-unit.py +++ /dev/null @@ -1,41 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue392-v2"] -LIMITS = {"search_time": 300} - -CONFIGS = {} -for randomize in ["false", "true"]: - for pref_first in ["false", "true"]: - CONFIGS["lama-unit-randomize-%(randomize)s-pref_first-%(pref_first)s" % locals()] = [ - "--heuristic", - "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=PLUSONE,cost_type=PLUSONE))", - "--search", - "iterated([" - "lazy_greedy([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s)," - "lazy_wastar([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=5)," - "lazy_wastar([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=3)," - "lazy_wastar([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=2)," - "lazy_wastar([hff,hlm],preferred=[hff,hlm],randomize_successors=%(randomize)s,preferred_successors_first=%(pref_first)s,w=1)]," - "repeat_last=true,continue_on_fail=true)" % locals() - ] - -SUITE = sorted(set(suites.suite_satisficing_with_ipc11()) & - set(suites.suite_unit_costs())) - - -exp = common_setup.IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) - -exp.add_absolute_report_step() - -exp() diff --git a/experiments/issue414/common_setup.py b/experiments/issue414/common_setup.py deleted file mode 100644 index 1fc39393b0..0000000000 --- a/experiments/issue414/common_setup.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue414/opt.py b/experiments/issue414/opt.py deleted file mode 100755 index dbce520b85..0000000000 --- a/experiments/issue414/opt.py +++ /dev/null @@ -1,36 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from downward import suites - -import common_setup - - -DIR = os.path.dirname(os.path.abspath(__file__)) -REPO = os.path.dirname(os.path.dirname(DIR)) - - -REVS = ["issue414-base", "issue414"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_optimal_with_ipc11() - -# The aliases are adjusted for the respective driver scripts by lab. -CONFIGS = { - "ipdb": ["--search", "astar(ipdb())"], -} -for alias in ["seq-opt-bjolp", "seq-opt-fdss-1", "seq-opt-fdss-2", - "seq-opt-lmcut", "seq-opt-merge-and-shrink"]: - CONFIGS[alias] = ["--alias", alias] - -exp = common_setup.IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue414/sat.py b/experiments/issue414/sat.py deleted file mode 100755 index ebc306d6a0..0000000000 --- a/experiments/issue414/sat.py +++ /dev/null @@ -1,38 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from downward import suites - -import common_setup - - -DIR = os.path.dirname(os.path.abspath(__file__)) -REPO = os.path.dirname(os.path.dirname(DIR)) - - -REVS = ["issue414-base", "issue414"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_satisficing_with_ipc11() - -# The aliases are adjusted for the respective driver scripts by lab. -CONFIGS = { - "seq_sat_lama_2011": ["ipc", "seq-sat-lama-2011"], - "seq_sat_fdss_1": ["ipc", "seq-sat-fdss-1"], - "seq_sat_fdss_2": ["--alias", "seq-sat-fdss-2"], - "lazy_greedy_ff": [ - "--heuristic", "h=ff()", - "--search", "lazy_greedy(h, preferred=h)"], -} - -exp = common_setup.IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue416/common_setup.py b/experiments/issue416/common_setup.py deleted file mode 100644 index 0b2eebe0ff..0000000000 --- a/experiments/issue416/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Wrapper for FastDownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, suite, revisions=[], configs={}, grid_priority=None, - path=None, test_suite=None, email=None, processes=1, - **kwargs): - """Create a DownwardExperiment with some convenience features. - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(os.path.join(repo, "benchmarks"), suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue416/common_setup_no_benchmarks.py b/experiments/issue416/common_setup_no_benchmarks.py deleted file mode 100644 index 934531f15d..0000000000 --- a/experiments/issue416/common_setup_no_benchmarks.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, - grid_priority=None, path=None, test_suite=None, - email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(benchmarks_dir, suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue416/relativescatter.py b/experiments/issue416/relativescatter.py deleted file mode 100644 index 41a8385a87..0000000000 --- a/experiments/issue416/relativescatter.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -# -# downward uses the lab package to conduct experiments with the -# Fast Downward planning system. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from collections import defaultdict -import os - -from lab import tools - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue416/v1-lama.py b/experiments/issue416/v1-lama.py deleted file mode 100755 index 6ebe7bf3ea..0000000000 --- a/experiments/issue416/v1-lama.py +++ /dev/null @@ -1,52 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite = suites.suite_satisficing_with_ipc11() - - configs = { - IssueConfig('seq_sat_lama_2011', [], driver_options=['--alias', 'seq-sat-lama-2011']), - IssueConfig('lama_first', [], driver_options=['--alias', 'lama-first']), - IssueConfig('ehc_lm_zhu', ['--search', 'ehc(lmcount(lm_zg()))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - for config in configs: - nick = config.nick - exp.add_report( - RelativeScatterPlotReport( - attributes=["memory"], - filter_config=["issue416-base-%s" % nick, "issue416-v1-%s" % nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue416_base_v1_memory_%s.png' % nick - ) - - exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=["issue416-base-%s" % nick, "issue416-v1-%s" % nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue416_base_v1_total_time_%s.png' % nick - ) - - exp() - -main(revisions=['issue416-base', 'issue416-v1']) diff --git a/experiments/issue416/v1.py b/experiments/issue416/v1.py deleted file mode 100755 index bf64ea75fa..0000000000 --- a/experiments/issue416/v1.py +++ /dev/null @@ -1,53 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('astar-blind', ['--search', 'astar(blind())']), - IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']), - IssueConfig('astar-ipdb', ['--search', 'astar(ipdb())']), - IssueConfig('astar-seq_opt_bjolp', ['--search', 'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true), mpd=true)']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - for config in configs: - nick = config.nick - exp.add_report( - RelativeScatterPlotReport( - attributes=["memory"], - filter_config=["issue416-base-%s" % nick, "issue416-v1-%s" % nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue416_base_v1_memory_%s.png' % nick - ) - - exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=["issue416-base-%s" % nick, "issue416-v1-%s" % nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue416_base_v1_total_time_%s.png' % nick - ) - - exp() - -main(revisions=['issue416-base', 'issue416-v1']) diff --git a/experiments/issue416/v2-lama.py b/experiments/issue416/v2-lama.py deleted file mode 100755 index d67baf5b54..0000000000 --- a/experiments/issue416/v2-lama.py +++ /dev/null @@ -1,53 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup_no_benchmarks import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite = suites.suite_satisficing_with_ipc11() - - configs = { - IssueConfig('seq_sat_lama_2011', [], driver_options=['--alias', 'seq-sat-lama-2011']), - IssueConfig('lama_first', [], driver_options=['--alias', 'lama-first']), - IssueConfig('ehc_lm_zhu', ['--search', 'ehc(lmcount(lm_zg()))']), - } - - exp = IssueExperiment( - benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/", - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - for config in configs: - nick = config.nick - exp.add_report( - RelativeScatterPlotReport( - attributes=["memory"], - filter_config=["issue416-v2-base-%s" % nick, "issue416-v2-%s" % nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue416_base_v2_memory_%s.png' % nick - ) - - exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=["issue416-v2-base-%s" % nick, "issue416-v2-%s" % nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue416_base_v2_total_time_%s.png' % nick - ) - - exp() - -main(revisions=['issue416-v2-base', 'issue416-v2']) diff --git a/experiments/issue416/v2.py b/experiments/issue416/v2.py deleted file mode 100755 index dacbff5c79..0000000000 --- a/experiments/issue416/v2.py +++ /dev/null @@ -1,54 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup_no_benchmarks import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('astar-blind', ['--search', 'astar(blind())']), - IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']), - IssueConfig('astar-ipdb', ['--search', 'astar(ipdb())']), - IssueConfig('astar-seq_opt_bjolp', ['--search', 'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true), mpd=true)']), - } - - exp = IssueExperiment( - benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/", - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - for config in configs: - nick = config.nick - exp.add_report( - RelativeScatterPlotReport( - attributes=["memory"], - filter_config=["issue416-v2-base-%s" % nick, "issue416-v2-%s" % nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue416_base_v2_memory_%s.png' % nick - ) - - exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=["issue416-v2-base-%s" % nick, "issue416-v2-%s" % nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue416_base_v2_total_time_%s.png' % nick - ) - - exp() - -main(revisions=['issue416-v2-base', 'issue416-v2']) diff --git a/experiments/issue420/common_setup.py b/experiments/issue420/common_setup.py deleted file mode 100644 index 56d69012c2..0000000000 --- a/experiments/issue420/common_setup.py +++ /dev/null @@ -1,238 +0,0 @@ -# -*- coding: utf-8 -*- - -import os.path - -from lab.environments import MaiaEnvironment -from lab.steps import Step - -from downward.checkouts import Translator, Preprocessor, Planner -from downward.experiments import DownwardExperiment -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the filename of the main script, e.g. - "/ham/spam/eggs.py" => "eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Found by searching upwards in the directory tree from the main - script until a directory with a subdirectory named ".hg" is found.""" - path = os.path.abspath(get_script_dir()) - while True: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - - -class MyExperiment(DownwardExperiment): - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "total_time", - "search_time", - "memory", - "expansions_until_last_jump", - ] - - """Wrapper for DownwardExperiment with a few convenience features.""" - - def __init__(self, configs=None, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - suite=None, parsers=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - If "configs" is specified, it should be a dict of {nick: - cmdline} pairs that sets the planner configurations to test. - - If "grid_priority" is specified and no environment is - specifically requested in **kwargs, use the maia environment - with the specified priority. - - If "path" is not specified, the experiment data path is - derived automatically from the main script's filename. - - If "repo" is not specified, the repository base is derived - automatically from the main script's path. - - If "revisions" is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. - - If "search_revisions" is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All experiments use the - translator and preprocessor component of the first - revision. - - If "suite" is specified, it should specify a problem suite. - - If "parsers" is specified, it should be a list of paths to - parsers that should be run in addition to search_parser.py. - - Options "combinations" (from the base class), "revisions" and - "search_revisions" are mutually exclusive.""" - - if grid_priority is not None and "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - num_rev_opts_specified = ( - int(revisions is not None) + - int(search_revisions is not None) + - int(kwargs.get("combinations") is not None)) - - if num_rev_opts_specified > 1: - raise ValueError('must specify exactly one of "revisions", ' - '"search_revisions" or "combinations"') - - # See add_comparison_table_step for more on this variable. - self._HACK_revisions = revisions - - if revisions is not None: - if not revisions: - raise ValueError("revisions cannot be empty") - combinations = [(Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions] - kwargs["combinations"] = combinations - - if search_revisions is not None: - if not search_revisions: - raise ValueError("search_revisions cannot be empty") - base_rev = search_revisions[0] - translator = Translator(repo, base_rev) - preprocessor = Preprocessor(repo, base_rev) - combinations = [(translator, preprocessor, Planner(repo, rev)) - for rev in search_revisions] - kwargs["combinations"] = combinations - - self._additional_parsers = parsers or [] - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - if configs is not None: - for nick, config in configs.items(): - self.add_config(nick, config) - - if suite is not None: - self.add_suite(suite) - - self._report_prefix = get_experiment_name() - - def _make_search_runs(self): - DownwardExperiment._make_search_runs(self) - for i, parser in enumerate(self._additional_parsers): - parser_alias = 'ADDITIONALPARSER%d' % i - self.add_resource(parser_alias, parser, os.path.basename(parser)) - for run in self.runs: - run.require_resource(parser_alias) - run.add_command('additional-parser-%d' % i, [parser_alias]) - - def add_comparison_table_step(self, attributes=None): - revisions = self._HACK_revisions - if revisions is None: - # TODO: It's not clear to me what a "revision" in the - # overall context of the code really is, e.g. when keeping - # the translator and preprocessor method fixed and only - # changing the search component. It's also not really - # clear to me how the interface of the Compare... reports - # works and how to use it more generally. Hence the - # present hack. - - # Ideally, this method should look at the table columns we - # have (defined by planners and planner configurations), - # pair them up in a suitable way, either controlled by a - # convenience parameter or a more general grouping method, - # and then use this to define which pairs go together. - raise NotImplementedError( - "only supported when specifying revisions in __init__") - - if attributes is None: - attributes = self.DEFAULT_TABLE_ATTRIBUTES - report = CompareRevisionsReport(*revisions, attributes=attributes) - self.add_report(report, outfile="%s-compare.html" % self._report_prefix) - - def add_scatter_plot_step(self, attributes=None): - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - revisions = self._HACK_revisions - if revisions is None: - # TODO: See add_comparison_table_step. - raise NotImplementedError( - "only supported when specifying revisions in __init__") - if len(revisions) != 2: - # TODO: Should generalize this, too, by offering a general - # grouping function and then comparing any pair of - # settings in the same group. - raise NotImplementedError("need two revisions") - scatter_dir = os.path.join(self.eval_dir, "scatter") - def make_scatter_plots(): - configs = [conf[0] for conf in self.configs] - for nick in configs: - config_before = "%s-%s" % (revisions[0], nick) - config_after = "%s-%s" % (revisions[1], nick) - for attribute in attributes: - name = "%s-%s-%s" % (self._report_prefix, attribute, nick) - report = ScatterPlotReport( - filter_config=[config_before, config_after], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue420/issue420-v1-regressions.py b/experiments/issue420/issue420-v1-regressions.py deleted file mode 100755 index 3036f11b52..0000000000 --- a/experiments/issue420/issue420-v1-regressions.py +++ /dev/null @@ -1,48 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- -""" -Before you can run the experiment you need to create duplicates of the -two tasks we want to test: - -cd ../benchmarks/tidybot-opt11-strips -for i in {00..49}; do cp p14.pddl p14-$i.pddl; done - -cd ../parking-opt11-strips -for i in {00..49}; do cp pfile04-015.pddl pfile04-015-$i.pddl; done - -Don't forget to remove the duplicate tasks afterwards. Otherwise they -will be included in subsequent experiments. -""" - -import common_setup - - -REVS = ["issue420-base", "issue420-v1"] -CONFIGS = { - "blind": ["--search", "astar(blind())"], - "lmcut": ["--search", "astar(lmcut())"], -} - -TEST_RUN = False - -if TEST_RUN: - SUITE = "gripper:prob01.pddl" - PRIORITY = None # "None" means local experiment -else: - SUITE = (["tidybot-opt11-strips:p14-%02d.pddl" % i for i in range(50)] + - ["parking-opt11-strips:pfile04-015-%02d.pddl" % i for i in range(50)]) - PRIORITY = 0 # number means maia experiment - - -exp = common_setup.MyExperiment( - grid_priority=PRIORITY, - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - ) - -exp.add_comparison_table_step( - attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES -) - -exp() diff --git a/experiments/issue420/issue420-v1.py b/experiments/issue420/issue420-v1.py deleted file mode 100755 index b5d93b01da..0000000000 --- a/experiments/issue420/issue420-v1.py +++ /dev/null @@ -1,36 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward.suites import suite_optimal_with_ipc11 - -import common_setup - - -REVS = ["issue420-base", "issue420-v1"] -CONFIGS = { - "blind": ["--search", "astar(blind())"], - "lmcut": ["--search", "astar(lmcut())"], -} - -TEST_RUN = False - -if TEST_RUN: - SUITE = "gripper:prob01.pddl" - PRIORITY = None # "None" means local experiment -else: - SUITE = suite_optimal_with_ipc11() - PRIORITY = 0 # number means maia experiment - - -exp = common_setup.MyExperiment( - grid_priority=PRIORITY, - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - ) - -exp.add_comparison_table_step( - attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES -) - -exp() diff --git a/experiments/issue422/common_setup.py b/experiments/issue422/common_setup.py deleted file mode 100644 index 442d65186c..0000000000 --- a/experiments/issue422/common_setup.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return (node.endswith("cluster") or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Once we have reference results, we should add "quality". - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue422/issue422.py b/experiments/issue422/issue422.py deleted file mode 100755 index af37f049eb..0000000000 --- a/experiments/issue422/issue422.py +++ /dev/null @@ -1,18 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -exp = common_setup.IssueExperiment( - search_revisions=["issue422-base", "issue422-v1"], - configs={"lmcut": ["--search", "astar(lmcut())"]}, - suite=suites.suite_optimal_with_ipc11(), - ) -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue425/common_setup.py b/experiments/issue425/common_setup.py deleted file mode 100644 index ebf622a012..0000000000 --- a/experiments/issue425/common_setup.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return (node.endswith("cluster.bc2.ch") or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Once we have reference results, we should add "quality". - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue425/opt.py b/experiments/issue425/opt.py deleted file mode 100644 index 0992d28cc5..0000000000 --- a/experiments/issue425/opt.py +++ /dev/null @@ -1,52 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites, configs -from downward.reports.compare import CompareConfigsReport - -import common_setup - -REVISIONS = ["issue425-base", "issue425-v1"] -CONFIGS = configs.default_configs_optimal() - -# remove config that is disabled in this branch -del CONFIGS['astar_selmax_lmcut_lmcount'] - -exp = common_setup.IssueExperiment( - search_revisions=REVISIONS, - configs=CONFIGS, - suite=suites.suite_optimal_with_ipc11(), - limits={"search_time": 300} - ) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -def grouped_configs_to_compare(config_nicks): - grouped_configs = [] - for config_nick in config_nicks: - col_names = ['%s-%s' % (r, config_nick) for r in REVISIONS] - grouped_configs.append((col_names[0], col_names[1], - 'Diff - %s' % config_nick)) - return grouped_configs - -exp.add_report(CompareConfigsReport( - compared_configs=grouped_configs_to_compare(configs.configs_optimal_core()), - attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES, - ), - outfile="issue425-opt-compare-core-configs.html" -) - -def add_first_run_search_time(run): - if run.get("search_time_all", []): - run["first_run_search_time"] = run["search_time_all"][0] - return run - -exp.add_report(CompareConfigsReport( - compared_configs=grouped_configs_to_compare(configs.configs_optimal_ipc()), - attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["first_run_search_time"], - filter=add_first_run_search_time, - ), - outfile="issue425-opt-compare-portfolio-configs.html" -) - -exp() diff --git a/experiments/issue425/sat.py b/experiments/issue425/sat.py deleted file mode 100644 index 361faf0109..0000000000 --- a/experiments/issue425/sat.py +++ /dev/null @@ -1,48 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites, configs -from downward.reports.compare import CompareConfigsReport - -import common_setup - -REVISIONS = ["issue425-base", "issue425-v1"] - -exp = common_setup.IssueExperiment( - search_revisions=REVISIONS, - configs=configs.default_configs_satisficing(), - suite=suites.suite_satisficing_with_ipc11(), - limits={"search_time": 300} - ) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -def grouped_configs_to_compare(config_nicks): - grouped_configs = [] - for config_nick in config_nicks: - col_names = ['%s-%s' % (r, config_nick) for r in REVISIONS] - grouped_configs.append((col_names[0], col_names[1], - 'Diff - %s' % config_nick)) - return grouped_configs - -exp.add_report(CompareConfigsReport( - compared_configs=grouped_configs_to_compare(configs.configs_satisficing_core()), - attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES, - ), - outfile="issue425-sat-compare-core-configs.html" -) - -def add_first_run_search_time(run): - if run.get("search_time_all", []): - run["first_run_search_time"] = run["search_time_all"][0] - return run - -exp.add_report(CompareConfigsReport( - compared_configs=grouped_configs_to_compare(configs.configs_satisficing_ipc()), - attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["first_run_search_time"], - filter=add_first_run_search_time, - ), - outfile="issue425-sat-compare-portfolio-configs.html" -) - -exp() diff --git a/experiments/issue436/common_setup.py b/experiments/issue436/common_setup.py deleted file mode 100644 index 90e985c980..0000000000 --- a/experiments/issue436/common_setup.py +++ /dev/null @@ -1,352 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in valid_attributes: - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue436/configs.py b/experiments/issue436/configs.py deleted file mode 100644 index 18a8503e42..0000000000 --- a/experiments/issue436/configs.py +++ /dev/null @@ -1,204 +0,0 @@ -def configs_optimal_core(): - return { - # A* - "astar_blind": [ - "--search", - "astar(blind)"], - "astar_h2": [ - "--search", - "astar(hm(2))"], - "astar_ipdb": [ - "--search", - "astar(ipdb)"], - "astar_lmcount_lm_merged_rhw_hm": [ - "--search", - "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)"], - "astar_lmcut": [ - "--search", - "astar(lmcut)"], - "astar_hmax": [ - "--search", - "astar(hmax)"], - "astar_merge_and_shrink_bisim": [ - "--search", - "astar(merge_and_shrink(" - "merge_strategy=merge_linear(variable_order=reverse_level)," - "shrink_strategy=shrink_bisimulation(max_states=200000,greedy=false," - "group_by_h=true)))"], - "astar_merge_and_shrink_greedy_bisim": [ - "--search", - "astar(merge_and_shrink(" - "merge_strategy=merge_linear(variable_order=reverse_level)," - "shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1," - "greedy=true,group_by_h=false)))"], - "astar_merge_and_shrink_dfp_bisim": [ - "--search", - "astar(merge_and_shrink(merge_strategy=merge_dfp," - "shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1," - "greedy=false,group_by_h=true)))"], - "astar_selmax_lmcut_lmcount": [ - "--search", - "astar(selmax([lmcut(),lmcount(lm_merged([lm_hm(m=1),lm_rhw()])," - "admissible=true)],training_set=1000),mpd=true)"], - } - - -def configs_satisficing_core(): - return { - # A* - "astar_goalcount": [ - "--search", - "astar(goalcount)"], - # eager greedy - "eager_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "eager_greedy(h, preferred=h)"], - "eager_greedy_add": [ - "--heuristic", - "h=add()", - "--search", - "eager_greedy(h, preferred=h)"], - "eager_greedy_cg": [ - "--heuristic", - "h=cg()", - "--search", - "eager_greedy(h, preferred=h)"], - "eager_greedy_cea": [ - "--heuristic", - "h=cea()", - "--search", - "eager_greedy(h, preferred=h)"], - # lazy greedy - "lazy_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "lazy_greedy(h, preferred=h)"], - "lazy_greedy_add": [ - "--heuristic", - "h=add()", - "--search", - "lazy_greedy(h, preferred=h)"], - "lazy_greedy_cg": [ - "--heuristic", - "h=cg()", - "--search", - "lazy_greedy(h, preferred=h)"], - } - - -def configs_optimal_ipc(): - return { - "seq_opt_merge_and_shrink": ["ipc", "seq-opt-merge-and-shrink"], - "seq_opt_fdss_1": ["ipc", "seq-opt-fdss-1"], - "seq_opt_fdss_2": ["ipc", "seq-opt-fdss-2"], - } - - -def configs_satisficing_ipc(): - return { - "seq_sat_lama_2011": ["ipc", "seq-sat-lama-2011"], - "seq_sat_fdss_1": ["ipc", "seq-sat-fdss-1"], - "seq_sat_fdss_2": ["ipc", "seq-sat-fdss-2"], - } - - -def configs_optimal_extended(): - return { - # A* - "astar_lmcount_lm_merged_rhw_hm_no_order": [ - "--search", - "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)"], - } - - -def configs_satisficing_extended(): - return { - # eager greedy - "eager_greedy_alt_ff_cg": [ - "--heuristic", - "hff=ff()", - "--heuristic", - "hcg=cg()", - "--search", - "eager_greedy(hff,hcg,preferred=[hff,hcg])"], - "eager_greedy_ff_no_pref": [ - "--search", - "eager_greedy(ff())"], - # lazy greedy - "lazy_greedy_alt_cea_cg": [ - "--heuristic", - "hcea=cea()", - "--heuristic", - "hcg=cg()", - "--search", - "lazy_greedy(hcea,hcg,preferred=[hcea,hcg])"], - "lazy_greedy_ff_no_pref": [ - "--search", - "lazy_greedy(ff())"], - "lazy_greedy_cea": [ - "--heuristic", - "h=cea()", - "--search", - "lazy_greedy(h, preferred=h)"], - # lazy wA* - "lazy_wa3_ff": [ - "--heuristic", - "h=ff()", - "--search", - "lazy_wastar(h,w=3,preferred=h)"], - # eager wA* - "eager_wa3_cg": [ - "--heuristic", - "h=cg()", - "--search", - "eager(single(sum([g(),weight(h,3)])),preferred=h)"], - # ehc - "ehc_ff": [ - "--search", - "ehc(ff())"], - # iterated - "iterated_wa_ff": [ - "--heuristic", - "h=ff()", - "--search", - "iterated([lazy_wastar(h,w=10), lazy_wastar(h,w=5), lazy_wastar(h,w=3)," - "lazy_wastar(h,w=2), lazy_wastar(h,w=1)])"], - # pareto open list - "pareto_ff": [ - "--heuristic", - "h=ff()", - "--search", - "eager(pareto([sum([g(), h]), h]), reopen_closed=true, pathmax=false," - "f_eval=sum([g(), h]))"], - # bucket-based open list - "bucket_lmcut": [ - "--heuristic", - "h=lmcut()", - "--search", - "eager(single_buckets(h), reopen_closed=true, pathmax=false)"], - } - - -def default_configs_optimal(core=True, ipc=True, extended=False): - configs = {} - if core: - configs.update(configs_optimal_core()) - if ipc: - configs.update(configs_optimal_ipc()) - if extended: - configs.update(configs_optimal_extended()) - return configs - - -def default_configs_satisficing(core=True, ipc=True, extended=False): - configs = {} - if core: - configs.update(configs_satisficing_core()) - if ipc: - configs.update(configs_satisficing_ipc()) - if extended: - configs.update(configs_satisficing_extended()) - return configs diff --git a/experiments/issue436/opt-v1.py b/experiments/issue436/opt-v1.py deleted file mode 100644 index 2e73e68288..0000000000 --- a/experiments/issue436/opt-v1.py +++ /dev/null @@ -1,32 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -import configs - -import common_setup - - -REVS = ["issue436-base", "issue436-v1"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_optimal_with_ipc11() - -configs_optimal_core = configs.configs_optimal_core() -CONFIGS = {} -for name in ['astar_merge_and_shrink_greedy_bisim', 'astar_merge_and_shrink_dfp_bisim', - 'astar_ipdb', 'astar_hmax', 'astar_blind', 'astar_lmcut', - 'astar_merge_and_shrink_bisim', 'astar_lmcount_lm_merged_rhw_hm']: - CONFIGS[name] = configs_optimal_core[name] - - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue436/opt-v2.py b/experiments/issue436/opt-v2.py deleted file mode 100644 index 9f68bd63c2..0000000000 --- a/experiments/issue436/opt-v2.py +++ /dev/null @@ -1,33 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -import configs - -import common_setup - - -REVS = ["issue436-base", "issue436-v2"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_optimal_with_ipc11() - -configs_optimal_core = configs.configs_optimal_core() -CONFIGS = {} -for name in ['astar_merge_and_shrink_greedy_bisim', 'astar_merge_and_shrink_dfp_bisim', - 'astar_ipdb', 'astar_hmax', 'astar_blind', 'astar_lmcut', - 'astar_merge_and_shrink_bisim', 'astar_lmcount_lm_merged_rhw_hm']: - CONFIGS[name] = configs_optimal_core[name] - - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue436/sat-v1.py b/experiments/issue436/sat-v1.py deleted file mode 100644 index 09393c3fb0..0000000000 --- a/experiments/issue436/sat-v1.py +++ /dev/null @@ -1,30 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -import configs - -import common_setup - - -REVS = ["issue436-base", "issue436-v1"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_satisficing_with_ipc11() - -default_configs_satisficing = configs.default_configs_satisficing(extended=True) -CONFIGS = {} -for name in ['lazy_greedy_add', 'eager_greedy_ff', 'eager_greedy_add', 'lazy_greedy_ff', 'pareto_ff']: - CONFIGS[name] = default_configs_satisficing[name] - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - - -exp() diff --git a/experiments/issue436/sat-v2.py b/experiments/issue436/sat-v2.py deleted file mode 100644 index fbf1edbd6a..0000000000 --- a/experiments/issue436/sat-v2.py +++ /dev/null @@ -1,31 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -import configs - -import common_setup - - -REVS = ["issue436-base", "issue436-v2"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_satisficing_with_ipc11() - -default_configs_satisficing = configs.default_configs_satisficing(extended=True) -CONFIGS = {} -for name in ['lazy_greedy_add', 'eager_greedy_ff', 'eager_greedy_add', 'lazy_greedy_ff', 'pareto_ff']: - CONFIGS[name] = default_configs_satisficing[name] - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(attributes=['total_time', 'memory']) - - -exp() diff --git a/experiments/issue439/common_setup.py b/experiments/issue439/common_setup.py deleted file mode 100644 index 635088ec30..0000000000 --- a/experiments/issue439/common_setup.py +++ /dev/null @@ -1,399 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.reports import Table -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports import PlanningReport -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) - - -class RegressionReport(PlanningReport): - """ - Compare revisions for tasks on which the first revision performs - better than other revisions. - - *revision_nicks* must be a list of revision_nicks, e.g. - ["default", "issue123"]. - - *config_nicks* must be a list of configuration nicknames, e.g. - ["eager_greedy_ff", "eager_greedy_add"]. - - *regression_attribute* is the attribute that we compare between - different revisions. It defaults to "coverage". - - Example comparing search_time for tasks were we lose coverage:: - - exp.add_report(RegressionReport(revision_nicks=["default", "issue123"], - config_nicks=["eager_greedy_ff"], - regression_attribute="coverage", - attributes="search_time")) - """ - def __init__(self, revision_nicks, config_nicks, - regression_attribute="coverage", **kwargs): - PlanningReport.__init__(self, **kwargs) - assert revision_nicks - self.revision_nicks = revision_nicks - assert config_nicks - self.config_nicks = config_nicks - self.regression_attribute = regression_attribute - - def get_markup(self): - tables = [] - for (domain, problem) in self.problems: - for config_nick in self.config_nicks: - runs = [self.runs[(domain, problem, rev + "-" + config_nick)] - for rev in self.revision_nicks] - - if any(runs[0][self.regression_attribute] > - runs[i][self.regression_attribute] - for i in range(1, len(self.revision_nicks))): - print "\"%s:%s\"," % (domain, problem) - table = Table() - for rev, run in zip(self.revision_nicks, runs): - for attr in self.attributes: - table.add_cell(rev, attr, run.get(attr)) - table_name = ":".join((domain, problem, config_nick)) - tables.append((table_name, table)) - return "\n".join(name + "\n" + str(table) for name, table in tables) diff --git a/experiments/issue439/custom-parser.py b/experiments/issue439/custom-parser.py deleted file mode 100755 index b4e8206801..0000000000 --- a/experiments/issue439/custom-parser.py +++ /dev/null @@ -1,19 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - - -class CustomParser(Parser): - def __init__(self): - Parser.__init__(self) - self.add_pattern( - "init_time", - "Best heuristic value: \d+ \[g=0, 1 evaluated, 0 expanded, t=(.+)s, \d+ KB\]", - required=True, - type=float) - - -if __name__ == "__main__": - parser = CustomParser() - print "Running custom parser" - parser.parse() diff --git a/experiments/issue439/issue439-30min.py b/experiments/issue439/issue439-30min.py deleted file mode 100755 index be55092526..0000000000 --- a/experiments/issue439/issue439-30min.py +++ /dev/null @@ -1,36 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import configs, suites - -import common_setup - - -SEARCH_REVS = ["issue439-base", "issue439-v1"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_satisficing_with_ipc11() - -configs_satisficing_core = configs.configs_satisficing_core() -CONFIGS = {} -for name in ["eager_greedy_add", "eager_greedy_ff", - "lazy_greedy_add", "lazy_greedy_ff"]: - CONFIGS[name] = configs_satisficing_core[name] - - -exp = common_setup.IssueExperiment( - revisions=SEARCH_REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_search_parser("custom-parser.py") - -attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["init_time"] -exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) -exp.add_report(common_setup.RegressionReport( - revision_nicks=exp.revision_nicks, - config_nicks=CONFIGS.keys(), - attributes=attributes)) - -exp() diff --git a/experiments/issue439/issue439.py b/experiments/issue439/issue439.py deleted file mode 100755 index 671227af20..0000000000 --- a/experiments/issue439/issue439.py +++ /dev/null @@ -1,33 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import configs, suites - -import common_setup - - -SEARCH_REVS = ["issue439-base", "issue439-v1"] -LIMITS = {"search_time": 300} -SUITE = suites.suite_satisficing_with_ipc11() - -configs_satisficing_core = configs.configs_satisficing_core() -CONFIGS = {} -for name in ["eager_greedy_add", "eager_greedy_ff", - "lazy_greedy_add", "lazy_greedy_ff"]: - CONFIGS[name] = configs_satisficing_core[name] - - -exp = common_setup.IssueExperiment( - revisions=SEARCH_REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_report(common_setup.RegressionReport( - revision_nicks=exp.revision_nicks, - config_nicks=CONFIGS.keys())) - -exp() diff --git a/experiments/issue439/regressions.py b/experiments/issue439/regressions.py deleted file mode 100755 index d4bc91c673..0000000000 --- a/experiments/issue439/regressions.py +++ /dev/null @@ -1,45 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import configs - -import common_setup - - -SEARCH_REVS = ["issue439-base", "issue439-v1"] -LIMITS = {"search_time": 1800} -SUITE = [ - "airport:p45-airport5MUC-p6.pddl", - "elevators-sat08-strips:p22.pddl", - "parking-sat11-strips:pfile09-033.pddl", - "scanalyzer-08-strips:p30.pddl", - "transport-sat11-strips:p14.pddl", - "transport-sat11-strips:p16.pddl", - "trucks:p19.pddl", - "trucks-strips:p23.pddl", -] - -configs_satisficing_core = configs.configs_satisficing_core() -CONFIGS = {} -for name in ["eager_greedy_add", "eager_greedy_ff", - "lazy_greedy_add", "lazy_greedy_ff"]: - CONFIGS[name] = configs_satisficing_core[name] - - -exp = common_setup.IssueExperiment( - revisions=SEARCH_REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_search_parser("custom-parser.py") - -attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["init_time"] -exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) -exp.add_report(common_setup.RegressionReport( - revision_nicks=exp.revision_nicks, - config_nicks=CONFIGS.keys(), - attributes=attributes)) - -exp() diff --git a/experiments/issue443/common_setup.py b/experiments/issue443/common_setup.py deleted file mode 100644 index ebf622a012..0000000000 --- a/experiments/issue443/common_setup.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return (node.endswith("cluster.bc2.ch") or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Once we have reference results, we should add "quality". - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue443/issue443-v1.py b/experiments/issue443/issue443-v1.py deleted file mode 100755 index d31a3b013c..0000000000 --- a/experiments/issue443/issue443-v1.py +++ /dev/null @@ -1,22 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - -CONFIGS = { - 'astar_lmcount': [ - '--search', - 'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=true),mpd=true)'], -} - -exp = common_setup.IssueExperiment( - search_revisions=["issue443-base", "issue443-v1"], - configs=CONFIGS, - suite=suites.suite_optimal_with_ipc11(), - ) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue443/issue443-v2.py b/experiments/issue443/issue443-v2.py deleted file mode 100755 index 618b3cecce..0000000000 --- a/experiments/issue443/issue443-v2.py +++ /dev/null @@ -1,22 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - -CONFIGS = { - 'astar_lmcount': [ - '--search', - 'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=true),mpd=true)'], -} - -exp = common_setup.IssueExperiment( - search_revisions=["issue443-base", "issue443-v2"], - configs=CONFIGS, - suite=suites.suite_optimal_with_ipc11(), - ) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue443/issue443-v3.py b/experiments/issue443/issue443-v3.py deleted file mode 100755 index ea50ff2efc..0000000000 --- a/experiments/issue443/issue443-v3.py +++ /dev/null @@ -1,22 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - -CONFIGS = { - 'astar_lmcount': [ - '--search', - 'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=true),mpd=true)'], -} - -exp = common_setup.IssueExperiment( - search_revisions=["issue443-base", "issue443-v3"], - configs=CONFIGS, - suite=suites.suite_optimal_with_ipc11(), - ) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue443/issue443-v4.py b/experiments/issue443/issue443-v4.py deleted file mode 100755 index 61104f77b4..0000000000 --- a/experiments/issue443/issue443-v4.py +++ /dev/null @@ -1,22 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - -CONFIGS = { - 'astar_lmcount': [ - '--search', - 'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=true),mpd=true)'], -} - -exp = common_setup.IssueExperiment( - search_revisions=["issue443-v4-base", "issue443-v4"], - configs=CONFIGS, - suite=suites.suite_optimal_with_ipc11(), - ) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue453/airport-adl-exp.py b/experiments/issue453/airport-adl-exp.py deleted file mode 100755 index 93296813a3..0000000000 --- a/experiments/issue453/airport-adl-exp.py +++ /dev/null @@ -1,85 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport -from derived_variables_instances import DerivedVariableInstances, DERIVED_VARIABLES_SUITE - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue453-v1"] -CONFIGS = [] -# Add heuristics using axioms -HEURISTICS = ['ff', 'cg', 'cea', 'add'] -for h in HEURISTICS: - CONFIGS.append(IssueConfig(h+"-normal-axiom-rules", ["--evaluator", "heur=%s" % h, - "--search", "lazy_greedy([heur], preferred=[heur])"])) - CONFIGS.append(IssueConfig(h+"-overapprox-axiom-rules", ["--evaluator", "heur=%s" % h, - "--search", "lazy_greedy([heur], preferred=[heur])", - "--translate-options", "--overapproximate-axioms"]),) -# Add lama-first -CONFIGS.append(IssueConfig("lama-normal-axiom-rules", [], driver_options=["--alias", "lama-first"])) -CONFIGS.append(IssueConfig("lama-overapprox-axiom-rules", ["--translate-options", "--overapproximate-axioms"], - driver_options=["--alias", "lama-first"]),) -# Add A* with blind -CONFIGS.append(IssueConfig("blind-normal-axiom-rules", ["--search", "astar(blind)"])) -CONFIGS.append(IssueConfig("blind-overapprox-axiom-rules", ["--search", "astar(blind)", - "--translate-options", "--overapproximate-axioms"]),) - -SUITE = ["airport-adl"] -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = ["depot:p01.pddl", "gripper:prob01.pddl", "psr-middle:p01-s17-n2-l2-f30.pddl"] - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("parser.py") - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes = (['translator_axioms', - 'translator_derived_variables', - 'translator_axioms_removed', - 'translator_time_done', - 'translator_time_processing_axioms', - 'cost', - 'coverage', - 'error', - 'evaluations', - 'expansions', - 'initial_h_value', - 'generated', - 'memory', - 'planner_memory', - 'planner_time', - 'run_dir', - 'search_time', - 'total_time',]) -exp.add_absolute_report_step(attributes=attributes) -#exp.add_report(DerivedVariableInstances()) - -exp.run_steps() diff --git a/experiments/issue453/common_setup.py b/experiments/issue453/common_setup.py deleted file mode 100644 index e55c332e7c..0000000000 --- a/experiments/issue453/common_setup.py +++ /dev/null @@ -1,387 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick) - report = report_class( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue453/derived_variables_instances.py b/experiments/issue453/derived_variables_instances.py deleted file mode 100644 index 6db554e296..0000000000 --- a/experiments/issue453/derived_variables_instances.py +++ /dev/null @@ -1,24 +0,0 @@ - -from downward.reports import PlanningReport - -DERIVED_VARIABLES_SUITE=['airport-adl', - 'assembly', - 'miconic-fulladl', - 'openstacks', - 'openstacks-opt08-adl', - 'openstacks-sat08-adl', - 'optical-telegraphs', - 'philosophers', - 'psr-large', - 'psr-middle', - 'trucks'] - -class DerivedVariableInstances(PlanningReport): - def get_text(self): - selected_runs = [] - for (dom, prob), runs in self.problem_runs.items(): - for run in runs: - if run.get("translator_derived_variables") > 0: - selected_runs.append((dom, prob)) - - return "\n".join(["{}:{},".format(*item) for item in selected_runs]) diff --git a/experiments/issue453/requirements.txt b/experiments/issue453/requirements.txt deleted file mode 100644 index 99216707ad..0000000000 --- a/experiments/issue453/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -lab==6.0 diff --git a/experiments/issue453/v1.py b/experiments/issue453/v1.py deleted file mode 100755 index a1287a048c..0000000000 --- a/experiments/issue453/v1.py +++ /dev/null @@ -1,85 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport -from derived_variables_instances import DerivedVariableInstances, DERIVED_VARIABLES_SUITE - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue453-v1"] -CONFIGS = [] -# Add heuristics using axioms -HEURISTICS = ['ff', 'cg', 'cea', 'add'] -for h in HEURISTICS: - CONFIGS.append(IssueConfig(h+"-normal-axiom-rules", ["--evaluator", "heur=%s" % h, - "--search", "lazy_greedy([heur], preferred=[heur])"])) - CONFIGS.append(IssueConfig(h+"-overapprox-axiom-rules", ["--evaluator", "heur=%s" % h, - "--search", "lazy_greedy([heur], preferred=[heur])", - "--translate-options", "--overapproximate-axioms"]),) -# Add lama-first -CONFIGS.append(IssueConfig("lama-normal-axiom-rules", [], driver_options=["--alias", "lama-first"])) -CONFIGS.append(IssueConfig("lama-overapprox-axiom-rules", ["--translate-options", "--overapproximate-axioms"], - driver_options=["--alias", "lama-first"]),) -# Add A* with blind -CONFIGS.append(IssueConfig("blind-normal-axiom-rules", ["--search", "astar(blind)"])) -CONFIGS.append(IssueConfig("blind-overapprox-axiom-rules", ["--search", "astar(blind)", - "--translate-options", "--overapproximate-axioms"]),) - -SUITE = DERIVED_VARIABLES_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = ["depot:p01.pddl", "gripper:prob01.pddl", "psr-middle:p01-s17-n2-l2-f30.pddl"] - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("parser.py") - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes = (['translator_axioms', - 'translator_derived_variables', - 'translator_axioms_removed', - 'translator_time_done', - 'translator_time_processing_axioms', - 'cost', - 'coverage', - 'error', - 'evaluations', - 'expansions', - 'initial_h_value', - 'generated', - 'memory', - 'planner_memory', - 'planner_time', - 'run_dir', - 'search_time', - 'total_time',]) -exp.add_absolute_report_step(attributes=attributes) -#exp.add_report(DerivedVariableInstances()) - -exp.run_steps() diff --git a/experiments/issue453/v2-custom-pddls.py b/experiments/issue453/v2-custom-pddls.py deleted file mode 100755 index 995bd0b915..0000000000 --- a/experiments/issue453/v2-custom-pddls.py +++ /dev/null @@ -1,115 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport -from downward.reports.absolute import AbsoluteReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -#from relativescatter import RelativeScatterPlotReport -from derived_variables_instances import DerivedVariableInstances, DERIVED_VARIABLES_SUITE - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = '/infai/simsal00/translate-pddls' -REVISIONS = ["tip"] -CONFIGS = [] -# Add heuristics using axioms -HEURISTICS = ['ff', 'blind'] -LAYER_STRATEGY = ['max', 'min'] -OVERAPPROXIMATE = ['none','cycles','all'] -KEEP_REDUNDANT_POSITIVE_AXIOMS = [True, False] -NECESSARY_LITERALS = ['exact', 'non-derived', 'positive'] - -for h in HEURISTICS: - for ls in LAYER_STRATEGY: - for overapprox in OVERAPPROXIMATE: - for rd in KEEP_REDUNDANT_POSITIVE_AXIOMS: - for lit in NECESSARY_LITERALS: - options = ["--evaluator", "heur=%s" % h, "--search", "lazy_greedy([heur], preferred=[heur])", "--translate-options"] - options += ["--layer_strategy", ls] - options += ["--overapproximate_negated_axioms", overapprox] - options += ["--overapproximate_necessary_literals", lit] - name = "%s-%s-%s-%s" % (h,ls,overapprox,lit) - if rd: - options += ["--keep_redundant_positive_axioms"] - name += '-kr' - CONFIGS.append(IssueConfig(name, options)) -#for h in HEURISTICS: -# CONFIGS.append(IssueConfig(h+"-min-layers", ["--evaluator", "heur=%s" % h, -# "--search", "lazy_greedy([heur], preferred=[heur])", -# "--translate-options", "--layer_strategy", "min"]),) -# CONFIGS.append(IssueConfig(h+"-max-layers", ["--evaluator", "heur=%s" % h, -# "--search", "lazy_greedy([heur], preferred=[heur])", -# "--translate-options", "--layer_strategy", "max"]),) -# Add A* with blind -#CONFIGS.append(IssueConfig("blind-min-layers", ["--search", "astar(blind)", -# "--translate-options", "--layer_strategy", "min"]),) -#CONFIGS.append(IssueConfig("blind-max-layers", ["--search", "astar(blind)", -# "--translate-options", "--layer_strategy", "max"]),) - -#SUITE = ["psr-middle:p01-s17-n2-l2-f30.pddl"] -SUITE = ["bwnc", "citycar", "cups", "failed-negation", "graph", "layers", "mst"] -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = ["depot:p01.pddl", "gripper:prob01.pddl", "psr-middle:p01-s17-n2-l2-f30.pddl"] - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("parser.py") - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes = (['translator_axioms', - 'translator_derived_variables', - 'translator_axioms_removed', - 'translator_task_size', - 'translator_time_done', - 'translator_time_processing_axioms', - 'cost', - 'coverage', - 'error', - 'evaluations', - 'expansions', - 'initial_h_value', - 'generated', - 'memory', - 'planner_memory', - 'planner_time', - 'run_dir', - 'search_time', - 'total_time',]) - -def get_keep_redundant_pairs(): - pairs = [] - for h in HEURISTICS: - for ls in LAYER_STRATEGY: - for overapprox in OVERAPPROXIMATE: - for lit in NECESSARY_LITERALS: - pairs.append(("tip-%s-%s-%s-%s" % (h,ls,overapprox,lit), "tip-%s-%s-%s-%s-kr" % (h,ls, overapprox,lit))) - return pairs - -exp.add_absolute_report_step(attributes=attributes) -exp.add_report(ComparativeReport(get_keep_redundant_pairs(), attributes=attributes), outfile="issue453-v2-compare_keep_redundant.html") - -exp.run_steps() diff --git a/experiments/issue453/v2.py b/experiments/issue453/v2.py deleted file mode 100755 index 69bf4053f0..0000000000 --- a/experiments/issue453/v2.py +++ /dev/null @@ -1,115 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport -from downward.reports.absolute import AbsoluteReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport -from derived_variables_instances import DerivedVariableInstances, DERIVED_VARIABLES_SUITE - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["tip"] -CONFIGS = [] -# Add heuristics using axioms -HEURISTICS = ['ff', 'blind'] -LAYER_STRATEGY = ['max', 'min'] -OVERAPPROXIMATE = ['none','cycles','all'] -KEEP_REDUNDANT_POSITIVE_AXIOMS = [True, False] -NECESSARY_LITERALS = ['exact', 'non-derived', 'positive'] - -for h in HEURISTICS: - for ls in LAYER_STRATEGY: - for overapprox in OVERAPPROXIMATE: - for rd in KEEP_REDUNDANT_POSITIVE_AXIOMS: - for lit in NECESSARY_LITERALS: - options = ["--evaluator", "heur=%s" % h, "--search", "lazy_greedy([heur], preferred=[heur])", "--translate-options"] - options += ["--layer_strategy", ls] - options += ["--overapproximate_negated_axioms", overapprox] - options += ["--overapproximate_necessary_literals", lit] - name = "%s-%s-%s-%s" % (h,ls,overapprox,lit) - if rd: - options += ["--keep_redundant_positive_axioms"] - name += '-kr' - CONFIGS.append(IssueConfig(name, options)) -#for h in HEURISTICS: -# CONFIGS.append(IssueConfig(h+"-min-layers", ["--evaluator", "heur=%s" % h, -# "--search", "lazy_greedy([heur], preferred=[heur])", -# "--translate-options", "--layer_strategy", "min"]),) -# CONFIGS.append(IssueConfig(h+"-max-layers", ["--evaluator", "heur=%s" % h, -# "--search", "lazy_greedy([heur], preferred=[heur])", -# "--translate-options", "--layer_strategy", "max"]),) -# Add A* with blind -#CONFIGS.append(IssueConfig("blind-min-layers", ["--search", "astar(blind)", -# "--translate-options", "--layer_strategy", "min"]),) -#CONFIGS.append(IssueConfig("blind-max-layers", ["--search", "astar(blind)", -# "--translate-options", "--layer_strategy", "max"]),) - -#SUITE = ["psr-middle:p01-s17-n2-l2-f30.pddl"] -SUITE = DERIVED_VARIABLES_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = ["depot:p01.pddl", "gripper:prob01.pddl", "psr-middle:p01-s17-n2-l2-f30.pddl"] - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("parser.py") - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes = (['translator_axioms', - 'translator_derived_variables', - 'translator_axioms_removed', - 'translator_task_size', - 'translator_time_done', - 'translator_time_processing_axioms', - 'cost', - 'coverage', - 'error', - 'evaluations', - 'expansions', - 'initial_h_value', - 'generated', - 'memory', - 'planner_memory', - 'planner_time', - 'run_dir', - 'search_time', - 'total_time',]) - -def get_keep_redundant_pairs(): - pairs = [] - for h in HEURISTICS: - for ls in LAYER_STRATEGY: - for overapprox in OVERAPPROXIMATE: - for lit in NECESSARY_LITERALS: - pairs.append(("tip-%s-%s-%s-%s" % (h,ls,overapprox,lit), "tip-%s-%s-%s-%s-kr" % (h,ls, overapprox,lit))) - return pairs - -exp.add_absolute_report_step(attributes=attributes) -exp.add_report(ComparativeReport(get_keep_redundant_pairs(), attributes=attributes), outfile="issue453-v2-compare_keep_redundant.html") - -exp.run_steps() diff --git a/experiments/issue453/v3.py b/experiments/issue453/v3.py deleted file mode 100755 index 649c692611..0000000000 --- a/experiments/issue453/v3.py +++ /dev/null @@ -1,85 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport -from downward.reports.absolute import AbsoluteReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, get_repo_base -from derived_variables_instances import DerivedVariableInstances, DERIVED_VARIABLES_SUITE - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -BENCHMARKS_DIR_CUSTOM = "/infai/simsal00/translate-pddls" -SUITE = DERIVED_VARIABLES_SUITE -SUITE_CUSTOM = ["bwnc", "citycar", "cups", "failed-negation", "graph", "layers", "mst"] - - -CONFIGS = {'blind': (["--search", "astar(blind())"],[],[]), - 'ff-eager': (["--evaluator", "heur=ff", "--search", - "eager_greedy([heur], preferred=[heur])"],[],[]), - 'ff-lazy': (["--evaluator", "heur=ff", "--search", - "lazy_greedy([heur], preferred=[heur])"],[],[]), - 'lama-first': ([],[],["--alias", "lama-first"]) } - -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = ["depot:p01.pddl", "gripper:prob01.pddl", "psr-middle:p01-s17-n2-l2-f30.pddl"] - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment(environment=ENVIRONMENT, revisions=[], configs=[]) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_suite(BENCHMARKS_DIR_CUSTOM, SUITE_CUSTOM) -#exp.add_suite(BENCHMARKS_DIR, ["psr-middle:p01-s17-n2-l2-f30.pddl"]) - -for name, config in CONFIGS.items(): - exp.add_algorithm(name+'-base',get_repo_base(),'issue453-base',config[0],config[1],config[2]) - exp.add_algorithm(name+'-v3',get_repo_base(),'issue453-v3',config[0],config[1],config[2]) - exp.add_algorithm(name+'-v3-max-layers',get_repo_base(),'issue453-v3',config[0]+['--translate-options', '--layer-strategy=max'], config[1],config[2]) - - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("parser.py") - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes = (['translator_axioms', - 'translator_derived_variables', - 'translator_axioms_removed', - 'translator_task_size', - 'translator_time_done', - 'translator_time_processing_axioms', - 'cost', - 'coverage', - 'error', - 'evaluations', - 'expansions', - 'initial_h_value', - 'generated', - 'memory', - 'planner_memory', - 'planner_time', - 'run_dir', - 'search_time', - 'total_time', - 'score_evaluations', - 'score_search_time', - 'score_total_time', - ]) -exp.add_absolute_report_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue453/v4.py b/experiments/issue453/v4.py deleted file mode 100755 index e05d464b38..0000000000 --- a/experiments/issue453/v4.py +++ /dev/null @@ -1,85 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport -from downward.reports.absolute import AbsoluteReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, get_repo_base -from derived_variables_instances import DerivedVariableInstances, DERIVED_VARIABLES_SUITE - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -BENCHMARKS_DIR_CUSTOM = "/infai/simsal00/translate-pddls" -SUITE = DERIVED_VARIABLES_SUITE -SUITE_CUSTOM = ["bwnc", "citycar", "cups", "failed-negation", "graph", "layers", "mst"] - - -CONFIGS = {'blind': (["--search", "astar(blind())"],[],[]), - 'ff-eager': (["--evaluator", "heur=ff", "--search", - "eager_greedy([heur], preferred=[heur])"],[],[]), - 'ff-lazy': (["--evaluator", "heur=ff", "--search", - "lazy_greedy([heur], preferred=[heur])"],[],[]), - 'lama-first': ([],[],["--alias", "lama-first"]) } - -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = ["depot:p01.pddl", "gripper:prob01.pddl", "psr-middle:p01-s17-n2-l2-f30.pddl"] - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment(environment=ENVIRONMENT, revisions=[], configs=[]) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_suite(BENCHMARKS_DIR_CUSTOM, SUITE_CUSTOM) -#exp.add_suite(BENCHMARKS_DIR, ["psr-middle:p01-s17-n2-l2-f30.pddl"]) - -for name, config in CONFIGS.items(): - exp.add_algorithm(name+'-base',get_repo_base(),'issue453-base',config[0],config[1],config[2]) - exp.add_algorithm(name+'-v4',get_repo_base(),'issue453-v4',config[0],config[1],config[2]) - exp.add_algorithm(name+'-v4-max-layers',get_repo_base(),'issue453-v4',config[0]+['--translate-options', '--layer-strategy=max'], config[1],config[2]) - - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("parser.py") - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes = (['translator_axioms', - 'translator_derived_variables', - 'translator_axioms_removed', - 'translator_task_size', - 'translator_time_done', - 'translator_time_processing_axioms', - 'cost', - 'coverage', - 'error', - 'evaluations', - 'expansions', - 'initial_h_value', - 'generated', - 'memory', - 'planner_memory', - 'planner_time', - 'run_dir', - 'search_time', - 'total_time', - 'score_evaluations', - 'score_search_time', - 'score_total_time', - ]) -exp.add_absolute_report_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue453/v5.py b/experiments/issue453/v5.py deleted file mode 100755 index 224fd940ab..0000000000 --- a/experiments/issue453/v5.py +++ /dev/null @@ -1,91 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport -from downward.reports.absolute import AbsoluteReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, get_repo_base -from derived_variables_instances import DerivedVariableInstances, DERIVED_VARIABLES_SUITE - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -BENCHMARKS_DIR_CUSTOM = "/infai/simsal00/translate-pddls" -SUITE = DERIVED_VARIABLES_SUITE -SUITE_CUSTOM = ["bwnc", "citycar", "cups", "failed-negation", "graph", "layers", "mst"] - - -CONFIGS = {'blind': (["--search", "astar(blind())"],[],[]), - 'ff-eager': (["--evaluator", "heur=ff", "--search", - "eager_greedy([heur], preferred=[heur])"],[],[]), - 'ff-lazy': (["--evaluator", "heur=ff", "--search", - "lazy_greedy([heur], preferred=[heur])"],[],[]), - 'lama-first': ([],[],["--alias", "lama-first"]) } - -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = ["depot:p01.pddl", "gripper:prob01.pddl", "psr-middle:p01-s17-n2-l2-f30.pddl"] - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment(environment=ENVIRONMENT, revisions=[], configs=[]) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_suite(BENCHMARKS_DIR_CUSTOM, SUITE_CUSTOM) -#exp.add_suite(BENCHMARKS_DIR, ["psr-middle:p01-s17-n2-l2-f30.pddl"]) - -for name, config in CONFIGS.items(): - exp.add_algorithm(name+'-base',get_repo_base(),'issue453-base',config[0],config[1],config[2]) - exp.add_algorithm(name+'-v5',get_repo_base(),'issue453-v5',config[0],config[1],config[2]) - exp.add_algorithm(name+'-v5-max-layers',get_repo_base(),'issue453-v5',config[0]+['--translate-options', '--layer-strategy=max'], config[1],config[2]) - - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("parser.py") - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_parse_again_step() - -attributes = (['translator_axioms', - 'translator_derived_variables', - 'translator_axioms_removed', - 'translator_task_size', - 'translator_time_done', - 'translator_time_processing_axioms', - 'translator_time_computing_negative_axioms', - 'translator_time_simplifying_axioms', - 'translator_simplified_axioms', - 'cost', - 'coverage', - 'error', - 'evaluations', - 'expansions', - 'expansions_until_last_jump', - 'initial_h_value', - 'generated', - 'memory', - 'planner_memory', - 'planner_time', - 'run_dir', - 'search_time', - 'total_time', - 'score_evaluations', - 'score_search_time', - 'score_total_time', - ]) -exp.add_absolute_report_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue455/common_setup.py b/experiments/issue455/common_setup.py deleted file mode 100644 index 6989ef8686..0000000000 --- a/experiments/issue455/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in self.get_supported_attributes( - config_nick, attributes): - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue455/v1.py b/experiments/issue455/v1.py deleted file mode 100755 index 14d55c8b62..0000000000 --- a/experiments/issue455/v1.py +++ /dev/null @@ -1,50 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue455"] -SUITE = suites.suite_satisficing_with_ipc11() - -CONFIGS = { - "01-ff": [ - "--heuristic", - "hff=ff(cost_type=one)", - "--search", - "lazy(alt([single(hff),single(hff, pref_only=true)])," - "preferred=[hff],cost_type=one)" - ], - "02-ff-type-const": [ - "--heuristic", - "hff=ff(cost_type=one)", - "--search", - "lazy(alt([single(hff),single(hff, pref_only=true), type_based([const(1)])])," - "preferred=[hff],cost_type=one)" - ], - "03-lama-first": [ - "--heuristic", - "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=one,cost_type=one))", - "--search", - "lazy(alt([single(hff),single(hff, pref_only=true), single(hlm), single(hlm, pref_only=true)])," - "preferred=[hff,hlm],cost_type=one)" - ], - "04-lama-first-types-ff-g": [ - "--heuristic", - "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=one,cost_type=one))", - "--search", - "lazy(alt([single(hff),single(hff, pref_only=true), single(hlm), single(hlm, pref_only=true), type_based([hff, g()])])," - "preferred=[hff,hlm],cost_type=one)" - ], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - ) -exp.add_absolute_report_step() - -exp() diff --git a/experiments/issue456/common_setup.py b/experiments/issue456/common_setup.py deleted file mode 100644 index 90e985c980..0000000000 --- a/experiments/issue456/common_setup.py +++ /dev/null @@ -1,352 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in valid_attributes: - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue456/opt-v1.py b/experiments/issue456/opt-v1.py deleted file mode 100644 index 6c279052f3..0000000000 --- a/experiments/issue456/opt-v1.py +++ /dev/null @@ -1,25 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue456-base", "issue456-v1"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_optimal_with_ipc11() -CONFIGS = { - "astar_blind": ["--search", "astar(blind())"], - "astar_hmax": ["--search", "astar(hmax())"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue456/opt-v2.py b/experiments/issue456/opt-v2.py deleted file mode 100644 index fc5a46cbb0..0000000000 --- a/experiments/issue456/opt-v2.py +++ /dev/null @@ -1,25 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue456-base", "issue456-v2"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_optimal_with_ipc11() -CONFIGS = { - "astar_blind": ["--search", "astar(blind())"], - "astar_hmax": ["--search", "astar(hmax())"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue456/sat-v1.py b/experiments/issue456/sat-v1.py deleted file mode 100644 index 9e7a6da985..0000000000 --- a/experiments/issue456/sat-v1.py +++ /dev/null @@ -1,33 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue456-base", "issue456-v1"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_satisficing_with_ipc11() -CONFIGS = { - "eager_greedy_add": [ - "--heuristic", - "h=add()", - "--search", - "eager_greedy(h, preferred=h)"], - "lazy_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "lazy_greedy(h, preferred=h)"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue456/sat-v2.py b/experiments/issue456/sat-v2.py deleted file mode 100644 index fc39f00bdc..0000000000 --- a/experiments/issue456/sat-v2.py +++ /dev/null @@ -1,33 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue456-base", "issue456-v2"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_satisficing_with_ipc11() -CONFIGS = { - "eager_greedy_add": [ - "--heuristic", - "h=add()", - "--search", - "eager_greedy(h, preferred=h)"], - "lazy_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "lazy_greedy(h, preferred=h)"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue462/common_setup.py b/experiments/issue462/common_setup.py deleted file mode 100644 index ebf622a012..0000000000 --- a/experiments/issue462/common_setup.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return (node.endswith("cluster.bc2.ch") or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Once we have reference results, we should add "quality". - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue462/issue462-opt.py b/experiments/issue462/issue462-opt.py deleted file mode 100755 index 15cd2e7559..0000000000 --- a/experiments/issue462/issue462-opt.py +++ /dev/null @@ -1,54 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites, configs -from downward.reports.compare import CompareConfigsReport - -import common_setup - -REVISIONS = ["issue462-base", "issue462-v1"] -CONFIGS = configs.default_configs_optimal() - -# remove config that is disabled in this branch -del CONFIGS['astar_selmax_lmcut_lmcount'] - -exp = common_setup.IssueExperiment( - search_revisions=REVISIONS, - configs=CONFIGS, - suite=suites.suite_optimal_with_ipc11(), - limits={"search_time": 300} - ) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -def grouped_configs_to_compare(config_nicks): - grouped_configs = [] - for config_nick in config_nicks: - col_names = ['%s-%s' % (r, config_nick) for r in REVISIONS] - grouped_configs.append((col_names[0], col_names[1], - 'Diff - %s' % config_nick)) - return grouped_configs - -exp.add_report(CompareConfigsReport( - compared_configs=grouped_configs_to_compare(configs.configs_optimal_core()), - attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES, - ), - outfile="issue462-opt-compare-core-configs.html" -) - -def add_first_run_search_time(run): - if run.get("search_time_all", []): - run["first_run_search_time"] = run["search_time_all"][0] - return run - -exp.add_report(CompareConfigsReport( - compared_configs=grouped_configs_to_compare(configs.configs_optimal_ipc()), - attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["first_run_search_time"], - filter=add_first_run_search_time, - ), - outfile="issue462-opt-compare-portfolio-configs.html" -) - -exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue462/issue462-sat.py b/experiments/issue462/issue462-sat.py deleted file mode 100755 index 26ad90dce1..0000000000 --- a/experiments/issue462/issue462-sat.py +++ /dev/null @@ -1,50 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites, configs -from downward.reports.compare import CompareConfigsReport - -import common_setup - -REVISIONS = ["issue462-base", "issue462-v1"] - -exp = common_setup.IssueExperiment( - search_revisions=REVISIONS, - configs=configs.default_configs_satisficing(), - suite=suites.suite_satisficing_with_ipc11(), - limits={"search_time": 300} - ) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -def grouped_configs_to_compare(config_nicks): - grouped_configs = [] - for config_nick in config_nicks: - col_names = ['%s-%s' % (r, config_nick) for r in REVISIONS] - grouped_configs.append((col_names[0], col_names[1], - 'Diff - %s' % config_nick)) - return grouped_configs - -exp.add_report(CompareConfigsReport( - compared_configs=grouped_configs_to_compare(configs.configs_satisficing_core()), - attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES, - ), - outfile="issue462-sat-compare-core-configs.html" -) - -def add_first_run_search_time(run): - if run.get("search_time_all", []): - run["first_run_search_time"] = run["search_time_all"][0] - return run - -exp.add_report(CompareConfigsReport( - compared_configs=grouped_configs_to_compare(configs.configs_satisficing_ipc()), - attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["first_run_search_time"], - filter=add_first_run_search_time, - ), - outfile="issue462-sat-compare-portfolio-configs.html" -) - -exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue467/common_setup.py b/experiments/issue467/common_setup.py deleted file mode 100644 index eeca3aadb5..0000000000 --- a/experiments/issue467/common_setup.py +++ /dev/null @@ -1,395 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, lambda: make_scatter_plots) diff --git a/experiments/issue467/landmark_parser.py b/experiments/issue467/landmark_parser.py deleted file mode 100755 index 943492471b..0000000000 --- a/experiments/issue467/landmark_parser.py +++ /dev/null @@ -1,29 +0,0 @@ -#! /usr/bin/env python - -import re - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern( - "lmgraph_generation_time", - r"Landmark graph generation time: (.+)s", - type=float) -parser.add_pattern( - "landmarks", - r"Landmark graph contains (\d+) landmarks, of which \d+ are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_disjunctive", - r"Landmark graph contains \d+ landmarks, of which (\d+) are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_conjunctive", - r"Landmark graph contains \d+ landmarks, of which \d+ are disjunctive and (\d+) are conjunctive.", - type=int) -parser.add_pattern( - "orderings", - r"Landmark graph contains (\d+) orderings.", - type=int) - -parser.parse() diff --git a/experiments/issue467/v1-optimal.py b/experiments/issue467/v1-optimal.py deleted file mode 100755 index 75bd664d61..0000000000 --- a/experiments/issue467/v1-optimal.py +++ /dev/null @@ -1,93 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue467-base-seq-opt-bjolp", "issue467-v1-seq-opt-bjolp"), - ("issue467-base-seq-opt-bjolp-opt", "issue467-v1-seq-opt-bjolp-opt"), - ("issue467-base-lm-exhaust", "issue467-v1-lm-exhaust"), - ("issue467-base-lm-hm2", "issue467-v1-lm-hm2"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - - exp.add_report(report) - - -REVISIONS = [ - "issue467-base", - "issue467-v1", -] - -CONFIGS = [ - common_setup.IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), - common_setup.IssueConfig( - "lm-exhaust", ["--evaluator", - "lmc=lmcount(lm_exhaust(),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - common_setup.IssueConfig( - "lm-hm2", ["--evaluator", - "lmc=lmcount(lm_hm(m=2),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - common_setup.IssueConfig( - "seq-opt-bjolp-opt", ["--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue467/v1-satisficing.py b/experiments/issue467/v1-satisficing.py deleted file mode 100755 index f1af395ab8..0000000000 --- a/experiments/issue467/v1-satisficing.py +++ /dev/null @@ -1,89 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup - -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue467-base-lama-first", "issue467-v1-lama-first"), - ("issue467-base-lama-first-pref", "issue467-v1-lama-first-pref"), - ("issue467-base-lm-zg", "issue467-v1-lm-zg"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - -REVISIONS = [ - "issue467-base", - "issue467-v1", -] - -CONFIGS = [ - common_setup.IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), - common_setup.IssueConfig( - "lama-first-pref", ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), - common_setup.IssueConfig("lm-zg", [ - "--search", "eager_greedy([lmcount(lm_zg())])"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue468/common_setup.py b/experiments/issue468/common_setup.py deleted file mode 100644 index 442d65186c..0000000000 --- a/experiments/issue468/common_setup.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return (node.endswith("cluster") or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Once we have reference results, we should add "quality". - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue468/issue468.py b/experiments/issue468/issue468.py deleted file mode 100755 index af2b2a54af..0000000000 --- a/experiments/issue468/issue468.py +++ /dev/null @@ -1,22 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - -CONFIGS = { - 'astar_lmcount_lm_merged_rhw_hm': [ - '--search', - 'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)'], -} - -exp = common_setup.IssueExperiment( - search_revisions=["issue468-base", "issue468-v1"], - configs=CONFIGS, - suite=suites.suite_optimal_with_ipc11(), - ) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue469/common_setup.py b/experiments/issue469/common_setup.py deleted file mode 100644 index ebf622a012..0000000000 --- a/experiments/issue469/common_setup.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return (node.endswith("cluster.bc2.ch") or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Once we have reference results, we should add "quality". - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue469/issue469.py b/experiments/issue469/issue469.py deleted file mode 100755 index 7af90ca59b..0000000000 --- a/experiments/issue469/issue469.py +++ /dev/null @@ -1,35 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute -from lab.suites import suite_all - -import common_setup - -import os - - -exp = common_setup.IssueExperiment( - search_revisions=["issue469-base", "issue469-v1"], - configs={"astar_blind": ["--search", "astar(blind())"]}, - suite=suite_all(), - ) - -parser = os.path.join(common_setup.get_script_dir(), - 'raw_memory_parser.py') -exp.add_search_parser(parser) - -def add_unexplained_errors_as_int(run): - if run.get('error').startswith('unexplained'): - run['unexplained_errors'] = 1 - else: - run['unexplained_errors'] = 0 - return run - -exp.add_absolute_report_step( - attributes=['raw_memory', Attribute('unexplained_errors', absolute=True)], - filter=add_unexplained_errors_as_int -) - -exp() diff --git a/experiments/issue469/raw_memory_parser.py b/experiments/issue469/raw_memory_parser.py deleted file mode 100755 index acfd50d12a..0000000000 --- a/experiments/issue469/raw_memory_parser.py +++ /dev/null @@ -1,15 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -class RawMemoryParser(Parser): - def __init__(self): - Parser.__init__(self) - self.add_pattern('raw_memory', r'Peak memory: (.+) KB', type=int, required=False) - - -if __name__ == '__main__': - parser = RawMemoryParser() - print 'Running RawMemoryParser parser' - parser.parse() - diff --git a/experiments/issue470/common_setup.py b/experiments/issue470/common_setup.py deleted file mode 100644 index ebf622a012..0000000000 --- a/experiments/issue470/common_setup.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return (node.endswith("cluster.bc2.ch") or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Once we have reference results, we should add "quality". - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue470/issue470-cg.py b/experiments/issue470/issue470-cg.py deleted file mode 100755 index 52acef9e88..0000000000 --- a/experiments/issue470/issue470-cg.py +++ /dev/null @@ -1,27 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - -CONFIGS = { - "cg-lazy-nopref": [ - "--heuristic", "h=cg()", - "--search", "lazy_greedy(h)" - ], - "cg-lazy-pref": [ - "--heuristic", "h=cg()", - "--search", "lazy_greedy(h, preferred=[h])" - ], - } - -exp = common_setup.IssueExperiment( - search_revisions=["issue470-base", "issue470-v1"], - configs=CONFIGS, - suite=suites.suite_satisficing_with_ipc11(), - ) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue470/issue470.py b/experiments/issue470/issue470.py deleted file mode 100755 index f444777405..0000000000 --- a/experiments/issue470/issue470.py +++ /dev/null @@ -1,45 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - -CONFIGS = { - 'astar_merge_and_shrink_bisim': [ - '--search', - 'astar(merge_and_shrink(' - + 'merge_strategy=merge_linear(variable_order=reverse_level),' - + 'shrink_strategy=shrink_bisimulation(max_states=200000,greedy=false,' - + 'group_by_h=true)))'], - 'astar_merge_and_shrink_greedy_bisim': [ - '--search', - 'astar(merge_and_shrink(' - + 'merge_strategy=merge_linear(variable_order=reverse_level),' - + 'shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,' - + 'greedy=true,group_by_h=false)))'], - 'astar_merge_and_shrink_dfp_bisim': [ - '--search', - 'astar(merge_and_shrink(merge_strategy=merge_dfp,' - + 'shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,' - + 'greedy=false,group_by_h=true)))'], - 'astar_ipdb': [ - '--search', - 'astar(ipdb())'], - 'astar_pdb': [ - '--search', - 'astar(pdb())'], - 'astar_gapdb': [ - '--search', - 'astar(gapdb())'], -} - -exp = common_setup.IssueExperiment( - search_revisions=["issue470-base", "issue470-v1"], - configs=CONFIGS, - suite=suites.suite_optimal_with_ipc11(), - ) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue479/common_setup.py b/experiments/issue479/common_setup.py deleted file mode 100644 index ebf622a012..0000000000 --- a/experiments/issue479/common_setup.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return (node.endswith("cluster.bc2.ch") or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Once we have reference results, we should add "quality". - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue479/issue479-5min.py b/experiments/issue479/issue479-5min.py deleted file mode 100755 index 5e3c01a0d4..0000000000 --- a/experiments/issue479/issue479-5min.py +++ /dev/null @@ -1,24 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute - -import common_setup - -import os - - -exp = common_setup.IssueExperiment( - search_revisions=["issue479-v2"], - configs={ - 'dfp-b-50k': ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(max_states=100000,threshold=1,greedy=false),merge_strategy=merge_dfp(),label_reduction=label_reduction(before_shrinking=true, before_merging=false)))'], - 'blind': ['--search', 'astar(blind())'], - }, - suite=['airport'], - limits={"search_time": 300}, - ) - -exp.add_absolute_report_step(attributes=['coverage', 'error', 'run_dir']) - -exp() diff --git a/experiments/issue479/issue479.py b/experiments/issue479/issue479.py deleted file mode 100755 index b6c7ae8d03..0000000000 --- a/experiments/issue479/issue479.py +++ /dev/null @@ -1,23 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute - -import common_setup - -import os - - -exp = common_setup.IssueExperiment( - search_revisions=["issue479-v2"], - configs={ - 'dfp-b-50k': ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(max_states=100000,threshold=1,greedy=false),merge_strategy=merge_dfp(),label_reduction=label_reduction(before_shrinking=true, before_merging=false)))'], - 'blind': ['--search', 'astar(blind())'], - }, - suite=['airport'], - ) - -exp.add_absolute_report_step(attributes=['coverage', 'error', 'run_dir']) - -exp() diff --git a/experiments/issue481/common_setup.py b/experiments/issue481/common_setup.py deleted file mode 100644 index 595d4e5cea..0000000000 --- a/experiments/issue481/common_setup.py +++ /dev/null @@ -1,341 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Wrapper for FastDownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions, configs, suite, grid_priority=None, - path=None, test_suite=None, email=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(os.path.join(repo, "benchmarks"), suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append(( - "{rev1}-{config_nick}".format(**locals()), - "{rev2}-{config_nick}".format(**locals()), - "Diff ({config_nick})".format(**locals()))) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "{name}-{rev1}-{rev2}-compare.html".format( - name=self.name, **locals())) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue481/v1-lama.py b/experiments/issue481/v1-lama.py deleted file mode 100755 index 3443228336..0000000000 --- a/experiments/issue481/v1-lama.py +++ /dev/null @@ -1,25 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue481-base", "issue481-v1"] -SUITE = suites.suite_satisficing_with_ipc11() - -CONFIGS = [ - IssueConfig("lama", [], driver_options=["--alias", "seq-sat-lama-2011"]), -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="malte.helmert@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue481/v1-opt-test.py b/experiments/issue481/v1-opt-test.py deleted file mode 100755 index 3238ce3f6c..0000000000 --- a/experiments/issue481/v1-opt-test.py +++ /dev/null @@ -1,29 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue481-base", "issue481-v1"] -SUITE = suites.suite_optimal_with_ipc11() - -CONFIGS = [ - # Greedy (tests single and alternating open lists) - IssueConfig("astar_lmcut", [ - "--search", - "astar(lmcut())" - ]), -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="malte.helmert@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue481/v1-sat-test.py b/experiments/issue481/v1-sat-test.py deleted file mode 100755 index 8118ea18cd..0000000000 --- a/experiments/issue481/v1-sat-test.py +++ /dev/null @@ -1,88 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue481-base", "issue481-v1"] -SUITE = suites.suite_satisficing_with_ipc11() - -CONFIGS = [ - # Greedy (tests single and alternating open lists) - IssueConfig("eager_greedy_ff", [ - "--heuristic", - "h=ff()", - "--search", - "eager_greedy(h, preferred=h)" - ]), - IssueConfig("lazy_greedy_ff", [ - "--heuristic", - "h=ff()", - "--search", - "lazy_greedy(h, preferred=h)" - ]), - # Epsilon Greedy - IssueConfig("lazy_epsilon_greedy_ff", [ - "--heuristic", - "h=ff()", - "--search", - "lazy(epsilon_greedy(h))" - ]), - # Pareto - IssueConfig("lazy_pareto_ff_cea", [ - "--heuristic", - "h1=ff()", - "--heuristic", - "h2=cea()", - "--search", - "lazy(pareto([h1, h2]))" - ]), - # Single Buckets - IssueConfig("lazy_single_buckets_ff", [ - "--heuristic", - "h=ff()", - "--search", - "lazy(single_buckets(h))" - ]), - # Type based (from issue455) - IssueConfig("ff-type-const", [ - "--heuristic", - "hff=ff(cost_type=one)", - "--search", - "lazy(alt([single(hff),single(hff, pref_only=true), type_based([const(1)])])," - "preferred=[hff],cost_type=one)" - ]), - IssueConfig("lama-first", [ - "--heuristic", - "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=one,cost_type=one))", - "--search", - "lazy(alt([single(hff),single(hff, pref_only=true), single(hlm), single(hlm, pref_only=true)])," - "preferred=[hff,hlm],cost_type=one)" - ]), - IssueConfig("lama-first-types-ff-g", [ - "--heuristic", - "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=one,cost_type=one))", - "--search", - "lazy(alt([single(hff),single(hff, pref_only=true), single(hlm), single(hlm, pref_only=true), type_based([hff, g()])])," - "preferred=[hff,hlm],cost_type=one)" - ]), - -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="malte.helmert@unibas.ch" -) - -# Absolute report commented out because a comparison table is more useful for this issue. -# (It's still in this file because someone might want to use it as a basis.) -# Scatter plots commented out for now because I have no usable matplotlib available. -# exp.add_absolute_report_step() -exp.add_comparison_table_step() -# exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue482/common_setup.py b/experiments/issue482/common_setup.py deleted file mode 100644 index ebf622a012..0000000000 --- a/experiments/issue482/common_setup.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return (node.endswith("cluster.bc2.ch") or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Once we have reference results, we should add "quality". - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue482/issue482.py b/experiments/issue482/issue482.py deleted file mode 100755 index 7add15fb8b..0000000000 --- a/experiments/issue482/issue482.py +++ /dev/null @@ -1,22 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - -CONFIGS = { - 'astar_gapdb': [ - '--search', - 'astar(gapdb())'], -} - -exp = common_setup.IssueExperiment( - search_revisions=["issue482-base", "issue482-v1"], - configs=CONFIGS, - suite=suites.suite_optimal_with_ipc11(), - ) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue486/common_setup.py b/experiments/issue486/common_setup.py deleted file mode 100644 index ebf622a012..0000000000 --- a/experiments/issue486/common_setup.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return (node.endswith("cluster.bc2.ch") or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Once we have reference results, we should add "quality". - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue486/issue486.py b/experiments/issue486/issue486.py deleted file mode 100755 index 4d176b82a4..0000000000 --- a/experiments/issue486/issue486.py +++ /dev/null @@ -1,28 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - -CONFIGS = { - 'astar_ipdb': [ - '--search', - 'astar(ipdb())'], - 'astar_pdb': [ - '--search', - 'astar(pdb())'], - 'astar_gapdb': [ - '--search', - 'astar(gapdb())'], -} - -exp = common_setup.IssueExperiment( - search_revisions=["issue486-base", "issue486-v1"], - configs=CONFIGS, - suite=suites.suite_optimal_with_ipc11(), - ) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue488/common_setup.py b/experiments/issue488/common_setup.py deleted file mode 100644 index ebf622a012..0000000000 --- a/experiments/issue488/common_setup.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return (node.endswith("cluster.bc2.ch") or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Once we have reference results, we should add "quality". - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue488/issue488.py b/experiments/issue488/issue488.py deleted file mode 100755 index f708a4b5c1..0000000000 --- a/experiments/issue488/issue488.py +++ /dev/null @@ -1,28 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - -CONFIGS = { - 'astar_ipdb': [ - '--search', - 'astar(ipdb())'], - 'astar_pdb': [ - '--search', - 'astar(pdb())'], - 'astar_gapdb': [ - '--search', - 'astar(gapdb())'], -} - -exp = common_setup.IssueExperiment( - search_revisions=["issue488-base", "issue488-v1"], - configs=CONFIGS, - suite=suites.suite_optimal_with_ipc11(), - ) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue499/common_setup.py b/experiments/issue499/common_setup.py deleted file mode 100644 index 0b2eebe0ff..0000000000 --- a/experiments/issue499/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Wrapper for FastDownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, suite, revisions=[], configs={}, grid_priority=None, - path=None, test_suite=None, email=None, processes=1, - **kwargs): - """Create a DownwardExperiment with some convenience features. - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(os.path.join(repo, "benchmarks"), suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue499/relativescatter.py b/experiments/issue499/relativescatter.py deleted file mode 100644 index 41a8385a87..0000000000 --- a/experiments/issue499/relativescatter.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -# -# downward uses the lab package to conduct experiments with the -# Fast Downward planning system. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from collections import defaultdict -import os - -from lab import tools - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue499/v1.py b/experiments/issue499/v1.py deleted file mode 100755 index 665bc2feab..0000000000 --- a/experiments/issue499/v1.py +++ /dev/null @@ -1,57 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='martin.wehrle@unibas.ch', - ) - - exp.add_comparison_table_step() - - exp.add_report( - RelativeScatterPlotReport( - attributes=["memory"], - filter_config=["issue499-base-astar-lmcut", "issue499-v1-astar-lmcut"], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue499_base_v1_memory.png' - ) - - exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=["issue499-base-astar-lmcut", "issue499-v1-astar-lmcut"], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue499_base_v1_total_time.png' - ) - - exp.add_report( - RelativeScatterPlotReport( - attributes=["expansions_until_last_jump"], - filter_config=["issue499-base-astar-lmcut", "issue499-v1-astar-lmcut"], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue499_base_v1_expansions_until_last_jump.png' - ) - - exp() - -main(revisions=['issue499-base', 'issue499-v1']) diff --git a/experiments/issue508/common_setup.py b/experiments/issue508/common_setup.py deleted file mode 100644 index 90e985c980..0000000000 --- a/experiments/issue508/common_setup.py +++ /dev/null @@ -1,352 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in valid_attributes: - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue508/configs.py b/experiments/issue508/configs.py deleted file mode 100644 index 18a8503e42..0000000000 --- a/experiments/issue508/configs.py +++ /dev/null @@ -1,204 +0,0 @@ -def configs_optimal_core(): - return { - # A* - "astar_blind": [ - "--search", - "astar(blind)"], - "astar_h2": [ - "--search", - "astar(hm(2))"], - "astar_ipdb": [ - "--search", - "astar(ipdb)"], - "astar_lmcount_lm_merged_rhw_hm": [ - "--search", - "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)"], - "astar_lmcut": [ - "--search", - "astar(lmcut)"], - "astar_hmax": [ - "--search", - "astar(hmax)"], - "astar_merge_and_shrink_bisim": [ - "--search", - "astar(merge_and_shrink(" - "merge_strategy=merge_linear(variable_order=reverse_level)," - "shrink_strategy=shrink_bisimulation(max_states=200000,greedy=false," - "group_by_h=true)))"], - "astar_merge_and_shrink_greedy_bisim": [ - "--search", - "astar(merge_and_shrink(" - "merge_strategy=merge_linear(variable_order=reverse_level)," - "shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1," - "greedy=true,group_by_h=false)))"], - "astar_merge_and_shrink_dfp_bisim": [ - "--search", - "astar(merge_and_shrink(merge_strategy=merge_dfp," - "shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1," - "greedy=false,group_by_h=true)))"], - "astar_selmax_lmcut_lmcount": [ - "--search", - "astar(selmax([lmcut(),lmcount(lm_merged([lm_hm(m=1),lm_rhw()])," - "admissible=true)],training_set=1000),mpd=true)"], - } - - -def configs_satisficing_core(): - return { - # A* - "astar_goalcount": [ - "--search", - "astar(goalcount)"], - # eager greedy - "eager_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "eager_greedy(h, preferred=h)"], - "eager_greedy_add": [ - "--heuristic", - "h=add()", - "--search", - "eager_greedy(h, preferred=h)"], - "eager_greedy_cg": [ - "--heuristic", - "h=cg()", - "--search", - "eager_greedy(h, preferred=h)"], - "eager_greedy_cea": [ - "--heuristic", - "h=cea()", - "--search", - "eager_greedy(h, preferred=h)"], - # lazy greedy - "lazy_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "lazy_greedy(h, preferred=h)"], - "lazy_greedy_add": [ - "--heuristic", - "h=add()", - "--search", - "lazy_greedy(h, preferred=h)"], - "lazy_greedy_cg": [ - "--heuristic", - "h=cg()", - "--search", - "lazy_greedy(h, preferred=h)"], - } - - -def configs_optimal_ipc(): - return { - "seq_opt_merge_and_shrink": ["ipc", "seq-opt-merge-and-shrink"], - "seq_opt_fdss_1": ["ipc", "seq-opt-fdss-1"], - "seq_opt_fdss_2": ["ipc", "seq-opt-fdss-2"], - } - - -def configs_satisficing_ipc(): - return { - "seq_sat_lama_2011": ["ipc", "seq-sat-lama-2011"], - "seq_sat_fdss_1": ["ipc", "seq-sat-fdss-1"], - "seq_sat_fdss_2": ["ipc", "seq-sat-fdss-2"], - } - - -def configs_optimal_extended(): - return { - # A* - "astar_lmcount_lm_merged_rhw_hm_no_order": [ - "--search", - "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)"], - } - - -def configs_satisficing_extended(): - return { - # eager greedy - "eager_greedy_alt_ff_cg": [ - "--heuristic", - "hff=ff()", - "--heuristic", - "hcg=cg()", - "--search", - "eager_greedy(hff,hcg,preferred=[hff,hcg])"], - "eager_greedy_ff_no_pref": [ - "--search", - "eager_greedy(ff())"], - # lazy greedy - "lazy_greedy_alt_cea_cg": [ - "--heuristic", - "hcea=cea()", - "--heuristic", - "hcg=cg()", - "--search", - "lazy_greedy(hcea,hcg,preferred=[hcea,hcg])"], - "lazy_greedy_ff_no_pref": [ - "--search", - "lazy_greedy(ff())"], - "lazy_greedy_cea": [ - "--heuristic", - "h=cea()", - "--search", - "lazy_greedy(h, preferred=h)"], - # lazy wA* - "lazy_wa3_ff": [ - "--heuristic", - "h=ff()", - "--search", - "lazy_wastar(h,w=3,preferred=h)"], - # eager wA* - "eager_wa3_cg": [ - "--heuristic", - "h=cg()", - "--search", - "eager(single(sum([g(),weight(h,3)])),preferred=h)"], - # ehc - "ehc_ff": [ - "--search", - "ehc(ff())"], - # iterated - "iterated_wa_ff": [ - "--heuristic", - "h=ff()", - "--search", - "iterated([lazy_wastar(h,w=10), lazy_wastar(h,w=5), lazy_wastar(h,w=3)," - "lazy_wastar(h,w=2), lazy_wastar(h,w=1)])"], - # pareto open list - "pareto_ff": [ - "--heuristic", - "h=ff()", - "--search", - "eager(pareto([sum([g(), h]), h]), reopen_closed=true, pathmax=false," - "f_eval=sum([g(), h]))"], - # bucket-based open list - "bucket_lmcut": [ - "--heuristic", - "h=lmcut()", - "--search", - "eager(single_buckets(h), reopen_closed=true, pathmax=false)"], - } - - -def default_configs_optimal(core=True, ipc=True, extended=False): - configs = {} - if core: - configs.update(configs_optimal_core()) - if ipc: - configs.update(configs_optimal_ipc()) - if extended: - configs.update(configs_optimal_extended()) - return configs - - -def default_configs_satisficing(core=True, ipc=True, extended=False): - configs = {} - if core: - configs.update(configs_satisficing_core()) - if ipc: - configs.update(configs_satisficing_ipc()) - if extended: - configs.update(configs_satisficing_extended()) - return configs diff --git a/experiments/issue508/mas.py b/experiments/issue508/mas.py deleted file mode 100755 index 196a8c8c35..0000000000 --- a/experiments/issue508/mas.py +++ /dev/null @@ -1,27 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup -import configs - - -REVS = ["issue508-base", "issue508-v1"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_optimal_with_ipc11() - -configs_optimal_core = configs.default_configs_optimal(ipc=False) -CONFIGS = {} -for nick in ["astar_merge_and_shrink_bisim", "astar_merge_and_shrink_greedy_bisim"]: - CONFIGS[nick] = configs_optimal_core[nick] - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue511/common_setup.py b/experiments/issue511/common_setup.py deleted file mode 100644 index 90e985c980..0000000000 --- a/experiments/issue511/common_setup.py +++ /dev/null @@ -1,352 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in valid_attributes: - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue511/opt-v1.py b/experiments/issue511/opt-v1.py deleted file mode 100755 index a538051520..0000000000 --- a/experiments/issue511/opt-v1.py +++ /dev/null @@ -1,25 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue511-base", "issue511-v1"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_optimal_with_ipc11() -CONFIGS = { - "astar_blind": ["--search", "astar(blind())"], - "astar_hmax": ["--search", "astar(hmax())"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue511/sat-v1.py b/experiments/issue511/sat-v1.py deleted file mode 100755 index 0a062ef191..0000000000 --- a/experiments/issue511/sat-v1.py +++ /dev/null @@ -1,33 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue511-base", "issue511-v1"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_satisficing_with_ipc11() -CONFIGS = { - "eager_greedy_add": [ - "--heuristic", - "h=add()", - "--search", - "eager_greedy(h, preferred=h)"], - "lazy_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "lazy_greedy(h, preferred=h)"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue512/common_setup.py b/experiments/issue512/common_setup.py deleted file mode 100644 index 635088ec30..0000000000 --- a/experiments/issue512/common_setup.py +++ /dev/null @@ -1,399 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.reports import Table -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports import PlanningReport -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) - - -class RegressionReport(PlanningReport): - """ - Compare revisions for tasks on which the first revision performs - better than other revisions. - - *revision_nicks* must be a list of revision_nicks, e.g. - ["default", "issue123"]. - - *config_nicks* must be a list of configuration nicknames, e.g. - ["eager_greedy_ff", "eager_greedy_add"]. - - *regression_attribute* is the attribute that we compare between - different revisions. It defaults to "coverage". - - Example comparing search_time for tasks were we lose coverage:: - - exp.add_report(RegressionReport(revision_nicks=["default", "issue123"], - config_nicks=["eager_greedy_ff"], - regression_attribute="coverage", - attributes="search_time")) - """ - def __init__(self, revision_nicks, config_nicks, - regression_attribute="coverage", **kwargs): - PlanningReport.__init__(self, **kwargs) - assert revision_nicks - self.revision_nicks = revision_nicks - assert config_nicks - self.config_nicks = config_nicks - self.regression_attribute = regression_attribute - - def get_markup(self): - tables = [] - for (domain, problem) in self.problems: - for config_nick in self.config_nicks: - runs = [self.runs[(domain, problem, rev + "-" + config_nick)] - for rev in self.revision_nicks] - - if any(runs[0][self.regression_attribute] > - runs[i][self.regression_attribute] - for i in range(1, len(self.revision_nicks))): - print "\"%s:%s\"," % (domain, problem) - table = Table() - for rev, run in zip(self.revision_nicks, runs): - for attr in self.attributes: - table.add_cell(rev, attr, run.get(attr)) - table_name = ":".join((domain, problem, config_nick)) - tables.append((table_name, table)) - return "\n".join(name + "\n" + str(table) for name, table in tables) diff --git a/experiments/issue512/custom-parser.py b/experiments/issue512/custom-parser.py deleted file mode 100755 index b4e8206801..0000000000 --- a/experiments/issue512/custom-parser.py +++ /dev/null @@ -1,19 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - - -class CustomParser(Parser): - def __init__(self): - Parser.__init__(self) - self.add_pattern( - "init_time", - "Best heuristic value: \d+ \[g=0, 1 evaluated, 0 expanded, t=(.+)s, \d+ KB\]", - required=True, - type=float) - - -if __name__ == "__main__": - parser = CustomParser() - print "Running custom parser" - parser.parse() diff --git a/experiments/issue512/issue512.py b/experiments/issue512/issue512.py deleted file mode 100755 index c0aace0f7c..0000000000 --- a/experiments/issue512/issue512.py +++ /dev/null @@ -1,37 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import configs, suites - -import common_setup - - -SEARCH_REVS = ["issue512-base", "issue512-v1"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_satisficing_with_ipc11() - -configs_satisficing_core = configs.configs_satisficing_core() -CONFIGS = {} -for name in ["eager_greedy_add", "eager_greedy_ff", - "lazy_greedy_add", "lazy_greedy_ff"]: - CONFIGS[name] = configs_satisficing_core[name] -CONFIGS["blind"] = ["--search", "astar(blind())"] - - -exp = common_setup.IssueExperiment( - revisions=SEARCH_REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_search_parser("custom-parser.py") - -attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["init_time"] -exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) -exp.add_report(common_setup.RegressionReport( - revision_nicks=exp.revision_nicks, - config_nicks=CONFIGS.keys(), - attributes=attributes)) - -exp() diff --git a/experiments/issue524/common_setup.py b/experiments/issue524/common_setup.py deleted file mode 100644 index f215f30d64..0000000000 --- a/experiments/issue524/common_setup.py +++ /dev/null @@ -1,384 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl", "rovers:p01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue524/relativescatter.py b/experiments/issue524/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue524/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue524/suites.py b/experiments/issue524/suites.py deleted file mode 100644 index 4615212cfd..0000000000 --- a/experiments/issue524/suites.py +++ /dev/null @@ -1,350 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import argparse -import textwrap - - -HELP = "Convert suite name to list of domains or tasks." - - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted(set( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat())) - - -def suite_unsolvable(): - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt() + suite_ipc14_opt_strips()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat() + suite_ipc14_sat_strips()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("suite", help="suite name") - return parser.parse_args() - - -def main(): - prefix = "suite_" - suite_names = [ - name[len(prefix):] for name in sorted(globals().keys()) - if name.startswith(prefix)] - parser = argparse.ArgumentParser(description=HELP) - parser.add_argument("suite", choices=suite_names, help="suite name") - parser.add_argument( - "--width", default=72, type=int, - help="output line width (default: %(default)s). Use 1 for single " - "column.") - args = parser.parse_args() - suite_func = globals()[prefix + args.suite] - print(textwrap.fill( - str(suite_func()), - width=args.width, - break_long_words=False, - break_on_hyphens=False)) - - -if __name__ == "__main__": - main() diff --git a/experiments/issue524/v2.py b/experiments/issue524/v2.py deleted file mode 100755 index 81ffc3e706..0000000000 --- a/experiments/issue524/v2.py +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import os.path - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISION_CACHE = os.path.expanduser('~/lab/revision-cache') -REVISIONS = ["issue524-base-v2", "issue524-v2"] -CONFIGS = [ - IssueConfig('lm_hm', [ - '--landmarks', 'l=lm_hm()', - '--heuristic', 'h=lmcount(l)', - '--search', 'eager_greedy([h])']), -] + [ - IssueConfig('lm_rhw', [ - '--landmarks', 'l=lm_rhw()', - '--heuristic', 'h=lmcount(l)', - '--search', 'eager_greedy([h])']), -] + [ - IssueConfig('lm_zg', [ - '--landmarks', 'l=lm_zg()', - '--heuristic', 'h=lmcount(l)', - '--search', 'eager_greedy([h])']), -] + [ - IssueConfig('lm_exhaust', [ - '--landmarks', 'l=lm_exhaust()', - '--heuristic', 'h=lmcount(l)', - '--search', 'eager_greedy([h])']), -] + [ - IssueConfig('lm_merged', [ - '--landmarks', 'l1=lm_exhaust()', - '--landmarks', 'l2=lm_rhw()', - '--landmarks', 'l=lm_merged([l1, l2])', - '--heuristic', 'h=lmcount(l)', - '--search', 'eager_greedy([h])']), -] + [ - IssueConfig( - "lama-first", [], driver_options=["--alias", "lama-first"]) -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="cedric.geissmann@unibas.ch" -) - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, - revision_cache=REVISION_CACHE, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step() - -exp.run_steps() diff --git a/experiments/issue526/common_setup.py b/experiments/issue526/common_setup.py deleted file mode 100755 index 687019c482..0000000000 --- a/experiments/issue526/common_setup.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue526/relativescatter.py b/experiments/issue526/relativescatter.py deleted file mode 100755 index f74cb6e721..0000000000 --- a/experiments/issue526/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue526/v1-sat.py b/experiments/issue526/v1-sat.py deleted file mode 100755 index af79929c50..0000000000 --- a/experiments/issue526/v1-sat.py +++ /dev/null @@ -1,76 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue526-base", "issue526-v1"] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="manuel.heusner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -CONFIGS = [ - IssueConfig( - "ehc_ff", - ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=[h])"], - driver_options=["--overall-time-limit", "5m"]), - IssueConfig( - "lama-first-lazy", - [], - driver_options=["--alias", "lama-first", "--overall-time-limit", "5m"]), - IssueConfig( - "lama-first-eager", - ["--evaluator", - """hlm=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=one), - transform=adapt_costs(one))""", - "--evaluator", - "hff=ff_synergy(hlm)", - "--search", - """eager_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one)"""], - driver_options=["--overall-time-limit", "5m"]), -] - - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue527/common_setup.py b/experiments/issue527/common_setup.py deleted file mode 100644 index 6989ef8686..0000000000 --- a/experiments/issue527/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in self.get_supported_attributes( - config_nick, attributes): - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue527/compare_with_paper.py b/experiments/issue527/compare_with_paper.py deleted file mode 100755 index 412312f248..0000000000 --- a/experiments/issue527/compare_with_paper.py +++ /dev/null @@ -1,36 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from lab.experiment import Experiment -from lab.steps import Step -from downward.reports.compare import CompareConfigsReport -from common_setup import get_experiment_name, get_data_dir, get_repo_base - -import os - -DATADIR = os.path.join(os.path.dirname(__file__), 'data') - -exp = Experiment(get_data_dir()) - -exp.add_fetcher(os.path.join(DATADIR, 'e2013101802-pho-seq-constraints-eval'), filter_config_nick="astar_pho_seq_no_onesafe") -exp.add_fetcher(os.path.join(DATADIR, 'issue527-v2-eval'), filter_config_nick="astar_occ_seq") - -exp.add_report(CompareConfigsReport( - [ - ('869fec6f843b-astar_pho_seq_no_onesafe', 'issue527-v2-astar_occ_seq'), - ], - attributes=[ - 'coverage', - 'total_time', - 'expansions', - 'evaluations', - 'generated', - 'expansions_until_last_jump', - 'error', - ], - ) -) - - - -exp() diff --git a/experiments/issue527/v1.py b/experiments/issue527/v1.py deleted file mode 100755 index 09dd1ec6a6..0000000000 --- a/experiments/issue527/v1.py +++ /dev/null @@ -1,49 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -import common_setup - - -REVS = ["issue527-v1"] -SUITE = suites.suite_optimal_with_ipc11() - -CONFIGS = { - "astar_occ_lmcut": [ - "--search", - "astar(operatorcounting([lmcut_constraints()]))"], - "astar_occ_seq": [ - "--search", - "astar(operatorcounting([state_equation_constraints()]))"], - "astar_occ_pho_1": [ - "--search", - "astar(operatorcounting([pho_constraints_systematic(pattern_max_size=1, only_interesting_patterns=true)]))"], - "astar_occ_pho_2": [ - "--search", - "astar(operatorcounting([pho_constraints_systematic(pattern_max_size=2, only_interesting_patterns=true)]))"], - "astar_occ_pho_2_naive": [ - "--search", - "astar(operatorcounting([pho_constraints_systematic(pattern_max_size=2, only_interesting_patterns=false)]))"], - "astar_occ_pho_ipdb": [ - "--search", - "astar(operatorcounting([pho_constraints_ipdb()]))"], - "astar_cpdbs_1": [ - "--search", - "astar(cpdbs_systematic(pattern_max_size=1, only_interesting_patterns=true))"], - "astar_cpdbs_2": [ - "--search", - "astar(cpdbs_systematic(pattern_max_size=2, only_interesting_patterns=true))"], - "astar_occ_pho_2_naive": [ - "--search", - "astar(cpdbs_systematic(pattern_max_size=2, only_interesting_patterns=false))"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - ) - -exp.add_absolute_report_step() - -exp() diff --git a/experiments/issue527/v2.py b/experiments/issue527/v2.py deleted file mode 100755 index b328ddaf49..0000000000 --- a/experiments/issue527/v2.py +++ /dev/null @@ -1,37 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -import common_setup - - -REVS = ["issue527-v2"] -SUITE = suites.suite_optimal_with_ipc11() - -CONFIGS = { - "astar_occ_lmcut": [ - "--search", - "astar(operatorcounting([lmcut_constraints()]))"], - "astar_occ_seq": [ - "--search", - "astar(operatorcounting([state_equation_constraints()]))"], - "astar_occ_pho_1": [ - "--search", - "astar(operatorcounting([pho_constraints_systematic(pattern_max_size=1, only_interesting_patterns=true)]))"], - "astar_occ_pho_2": [ - "--search", - "astar(operatorcounting([pho_constraints_systematic(pattern_max_size=2, only_interesting_patterns=true)]))"], - "astar_occ_pho_ipdb": [ - "--search", - "astar(operatorcounting([pho_constraints_ipdb()]))"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - ) - -exp.add_absolute_report_step() - -exp() diff --git a/experiments/issue528/common_setup.py b/experiments/issue528/common_setup.py deleted file mode 100644 index 635088ec30..0000000000 --- a/experiments/issue528/common_setup.py +++ /dev/null @@ -1,399 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.reports import Table -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports import PlanningReport -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) - - -class RegressionReport(PlanningReport): - """ - Compare revisions for tasks on which the first revision performs - better than other revisions. - - *revision_nicks* must be a list of revision_nicks, e.g. - ["default", "issue123"]. - - *config_nicks* must be a list of configuration nicknames, e.g. - ["eager_greedy_ff", "eager_greedy_add"]. - - *regression_attribute* is the attribute that we compare between - different revisions. It defaults to "coverage". - - Example comparing search_time for tasks were we lose coverage:: - - exp.add_report(RegressionReport(revision_nicks=["default", "issue123"], - config_nicks=["eager_greedy_ff"], - regression_attribute="coverage", - attributes="search_time")) - """ - def __init__(self, revision_nicks, config_nicks, - regression_attribute="coverage", **kwargs): - PlanningReport.__init__(self, **kwargs) - assert revision_nicks - self.revision_nicks = revision_nicks - assert config_nicks - self.config_nicks = config_nicks - self.regression_attribute = regression_attribute - - def get_markup(self): - tables = [] - for (domain, problem) in self.problems: - for config_nick in self.config_nicks: - runs = [self.runs[(domain, problem, rev + "-" + config_nick)] - for rev in self.revision_nicks] - - if any(runs[0][self.regression_attribute] > - runs[i][self.regression_attribute] - for i in range(1, len(self.revision_nicks))): - print "\"%s:%s\"," % (domain, problem) - table = Table() - for rev, run in zip(self.revision_nicks, runs): - for attr in self.attributes: - table.add_cell(rev, attr, run.get(attr)) - table_name = ":".join((domain, problem, config_nick)) - tables.append((table_name, table)) - return "\n".join(name + "\n" + str(table) for name, table in tables) diff --git a/experiments/issue528/issue528-v3.py b/experiments/issue528/issue528-v3.py deleted file mode 100755 index c8d3f42a90..0000000000 --- a/experiments/issue528/issue528-v3.py +++ /dev/null @@ -1,38 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import configs, suites -from downward.reports.scatter import ScatterPlotReport - -import common_setup - - -SEARCH_REVS = ["issue528-base", "issue528-v3"] -SUITE = suites.suite_optimal_with_ipc11() - -CONFIGS = { - "astar_lmcut": ["--search", "astar(lmcut())"] -} - -exp = common_setup.IssueExperiment( - revisions=SEARCH_REVS, - configs=CONFIGS, - suite=SUITE, - ) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attr in ("memory", "total_time"): - exp.add_report( - ScatterPlotReport( - attributes=[attr], - filter_config=[ - "issue528-base-astar_lmcut", - "issue528-v3-astar_lmcut", - ], - ), - outfile='issue528_base_v3_%s.png' % attr - ) - -exp() diff --git a/experiments/issue528/issue528.py b/experiments/issue528/issue528.py deleted file mode 100755 index b92805e358..0000000000 --- a/experiments/issue528/issue528.py +++ /dev/null @@ -1,38 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import configs, suites -from downward.reports.scatter import ScatterPlotReport - -import common_setup - - -SEARCH_REVS = ["issue528-base", "issue528-v1", "issue528-v2"] -SUITE = suites.suite_optimal_with_ipc11() - -CONFIGS = { - "astar_lmcut": ["--search", "astar(lmcut())"] -} - -exp = common_setup.IssueExperiment( - revisions=SEARCH_REVS, - configs=CONFIGS, - suite=SUITE, - ) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attr in ("memory", "total_time"): - exp.add_report( - ScatterPlotReport( - attributes=[attr], - filter_config=[ - "issue528-base-astar_lmcut", - "issue528-v2-astar_lmcut", - ], - ), - outfile='issue528_base_v2_%s.png' % attr - ) - -exp() diff --git a/experiments/issue529/common_setup.py b/experiments/issue529/common_setup.py deleted file mode 100644 index 635088ec30..0000000000 --- a/experiments/issue529/common_setup.py +++ /dev/null @@ -1,399 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.reports import Table -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports import PlanningReport -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) - - -class RegressionReport(PlanningReport): - """ - Compare revisions for tasks on which the first revision performs - better than other revisions. - - *revision_nicks* must be a list of revision_nicks, e.g. - ["default", "issue123"]. - - *config_nicks* must be a list of configuration nicknames, e.g. - ["eager_greedy_ff", "eager_greedy_add"]. - - *regression_attribute* is the attribute that we compare between - different revisions. It defaults to "coverage". - - Example comparing search_time for tasks were we lose coverage:: - - exp.add_report(RegressionReport(revision_nicks=["default", "issue123"], - config_nicks=["eager_greedy_ff"], - regression_attribute="coverage", - attributes="search_time")) - """ - def __init__(self, revision_nicks, config_nicks, - regression_attribute="coverage", **kwargs): - PlanningReport.__init__(self, **kwargs) - assert revision_nicks - self.revision_nicks = revision_nicks - assert config_nicks - self.config_nicks = config_nicks - self.regression_attribute = regression_attribute - - def get_markup(self): - tables = [] - for (domain, problem) in self.problems: - for config_nick in self.config_nicks: - runs = [self.runs[(domain, problem, rev + "-" + config_nick)] - for rev in self.revision_nicks] - - if any(runs[0][self.regression_attribute] > - runs[i][self.regression_attribute] - for i in range(1, len(self.revision_nicks))): - print "\"%s:%s\"," % (domain, problem) - table = Table() - for rev, run in zip(self.revision_nicks, runs): - for attr in self.attributes: - table.add_cell(rev, attr, run.get(attr)) - table_name = ":".join((domain, problem, config_nick)) - tables.append((table_name, table)) - return "\n".join(name + "\n" + str(table) for name, table in tables) diff --git a/experiments/issue529/issue529.py b/experiments/issue529/issue529.py deleted file mode 100755 index d4ab610742..0000000000 --- a/experiments/issue529/issue529.py +++ /dev/null @@ -1,60 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import configs, suites -from downward.reports.scatter import ScatterPlotReport - -import common_setup -from relativescatter import RelativeScatterPlotReport - - -SEARCH_REVS = ["issue529-v1-base", "issue529-v1"] -SUITE = suites.suite_optimal_with_ipc11() - -CONFIGS = { - 'astar_blind': [ - '--search', - 'astar(blind())'], - 'astar_ipdb': [ - '--search', - 'astar(ipdb())'], - 'astar_cpdbs': [ - '--search', - 'astar(cpdbs())'], - 'astar_gapdb': [ - '--search', - 'astar(gapdb())'], - 'astar_pdb': [ - '--search', - 'astar(pdb())'], - 'astar_zopdbs': [ - '--search', - 'astar(zopdbs())'], - 'eager_greedy_cg': [ - '--heuristic', - 'h=cg()', - '--search', - 'eager_greedy(h, preferred=h)'], -} - -exp = common_setup.IssueExperiment( - revisions=SEARCH_REVS, - configs=CONFIGS, - suite=SUITE, - ) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for conf in CONFIGS: - for attr in ("memory", "total_time"): - exp.add_report( - RelativeScatterPlotReport( - attributes=[attr], - get_category=lambda run1, run2: run1.get("domain"), - filter_config=["issue529-v1-base-%s" % conf, "issue529-v1-%s" % conf] - ), - outfile='issue529_base_v1_%s_%s.png' % (conf, attr) - ) - -exp() diff --git a/experiments/issue529/relativescatter.py b/experiments/issue529/relativescatter.py deleted file mode 100644 index 41a8385a87..0000000000 --- a/experiments/issue529/relativescatter.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -# -# downward uses the lab package to conduct experiments with the -# Fast Downward planning system. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from collections import defaultdict -import os - -from lab import tools - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue532/timers-microbenchmark/.gitignore b/experiments/issue532/timers-microbenchmark/.gitignore deleted file mode 100644 index 10e7a1e57c..0000000000 --- a/experiments/issue532/timers-microbenchmark/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -/.obj/ -/benchmark -/Makefile.depend diff --git a/experiments/issue532/timers-microbenchmark/Makefile b/experiments/issue532/timers-microbenchmark/Makefile deleted file mode 100644 index 1119941993..0000000000 --- a/experiments/issue532/timers-microbenchmark/Makefile +++ /dev/null @@ -1,143 +0,0 @@ -DOWNWARD_BITWIDTH=32 - -HEADERS = - -SOURCES = main.cc $(HEADERS:%.h=%.cc) -TARGET = benchmark - -default: release - -OBJECT_SUFFIX_RELEASE = .release -TARGET_SUFFIX_RELEASE = -OBJECT_SUFFIX_DEBUG = .debug -TARGET_SUFFIX_DEBUG = -debug -OBJECT_SUFFIX_PROFILE = .profile -TARGET_SUFFIX_PROFILE = -profile - -OBJECTS_RELEASE = $(SOURCES:%.cc=.obj/%$(OBJECT_SUFFIX_RELEASE).o) -TARGET_RELEASE = $(TARGET)$(TARGET_SUFFIX_RELEASE) - -OBJECTS_DEBUG = $(SOURCES:%.cc=.obj/%$(OBJECT_SUFFIX_DEBUG).o) -TARGET_DEBUG = $(TARGET)$(TARGET_SUFFIX_DEBUG) - -OBJECTS_PROFILE = $(SOURCES:%.cc=.obj/%$(OBJECT_SUFFIX_PROFILE).o) -TARGET_PROFILE = $(TARGET)$(TARGET_SUFFIX_PROFILE) - -DEPEND = $(CXX) -MM - -## CXXFLAGS, LDFLAGS, POSTLINKOPT are options for compiler and linker -## that are used for all three targets (release, debug, and profile). -## (POSTLINKOPT are options that appear *after* all object files.) - -ifeq ($(DOWNWARD_BITWIDTH), 32) - BITWIDTHOPT = -m32 -else ifeq ($(DOWNWARD_BITWIDTH), 64) - BITWIDTHOPT = -m64 -else ifneq ($(DOWNWARD_BITWIDTH), native) - $(error Bad value for DOWNWARD_BITWIDTH) -endif - -CXXFLAGS = -CXXFLAGS += -g -CXXFLAGS += $(BITWIDTHOPT) -CXXFLAGS += -std=c++11 -Wall -Wextra -pedantic -Wno-deprecated -Werror - -LDFLAGS = -LDFLAGS += $(BITWIDTHOPT) -LDFLAGS += -g - -POSTLINKOPT = - -CXXFLAGS_RELEASE = -O3 -DNDEBUG -fomit-frame-pointer -CXXFLAGS_DEBUG = -O3 -CXXFLAGS_PROFILE = -O3 -pg - -LDFLAGS_RELEASE = -LDFLAGS_DEBUG = -LDFLAGS_PROFILE = -pg - -POSTLINKOPT_RELEASE = -POSTLINKOPT_DEBUG = -POSTLINKOPT_PROFILE = - -LDFLAGS_RELEASE += -static -static-libgcc - -POSTLINKOPT_RELEASE += -Wl,-Bstatic -lrt -POSTLINKOPT_DEBUG += -lrt -POSTLINKOPT_PROFILE += -lrt - -all: release debug profile - -## Build rules for the release target follow. - -release: $(TARGET_RELEASE) - -$(TARGET_RELEASE): $(OBJECTS_RELEASE) - $(CXX) $(LDFLAGS) $(LDFLAGS_RELEASE) $(OBJECTS_RELEASE) $(POSTLINKOPT) $(POSTLINKOPT_RELEASE) -o $(TARGET_RELEASE) - -$(OBJECTS_RELEASE): .obj/%$(OBJECT_SUFFIX_RELEASE).o: %.cc - @mkdir -p $$(dirname $@) - $(CXX) $(CXXFLAGS) $(CXXFLAGS_RELEASE) -c $< -o $@ - -## Build rules for the debug target follow. - -debug: $(TARGET_DEBUG) - -$(TARGET_DEBUG): $(OBJECTS_DEBUG) - $(CXX) $(LDFLAGS) $(LDFLAGS_DEBUG) $(OBJECTS_DEBUG) $(POSTLINKOPT) $(POSTLINKOPT_DEBUG) -o $(TARGET_DEBUG) - -$(OBJECTS_DEBUG): .obj/%$(OBJECT_SUFFIX_DEBUG).o: %.cc - @mkdir -p $$(dirname $@) - $(CXX) $(CXXFLAGS) $(CXXFLAGS_DEBUG) -c $< -o $@ - -## Build rules for the profile target follow. - -profile: $(TARGET_PROFILE) - -$(TARGET_PROFILE): $(OBJECTS_PROFILE) - $(CXX) $(LDFLAGS) $(LDFLAGS_PROFILE) $(OBJECTS_PROFILE) $(POSTLINKOPT) $(POSTLINKOPT_PROFILE) -o $(TARGET_PROFILE) - -$(OBJECTS_PROFILE): .obj/%$(OBJECT_SUFFIX_PROFILE).o: %.cc - @mkdir -p $$(dirname $@) - $(CXX) $(CXXFLAGS) $(CXXFLAGS_PROFILE) -c $< -o $@ - -## Additional targets follow. - -PROFILE: $(TARGET_PROFILE) - ./$(TARGET_PROFILE) $(ARGS_PROFILE) - gprof $(TARGET_PROFILE) | (cleanup-profile 2> /dev/null || cat) > PROFILE - -clean: - rm -rf .obj - rm -f *~ *.pyc - rm -f Makefile.depend gmon.out PROFILE core - rm -f sas_plan - -distclean: clean - rm -f $(TARGET_RELEASE) $(TARGET_DEBUG) $(TARGET_PROFILE) - -## NOTE: If we just call gcc -MM on a source file that lives within a -## subdirectory, it will strip the directory part in the output. Hence -## the for loop with the sed call. - -Makefile.depend: $(SOURCES) $(HEADERS) - rm -f Makefile.temp - for source in $(SOURCES) ; do \ - $(DEPEND) $(CXXFLAGS) $$source > Makefile.temp0; \ - objfile=$${source%%.cc}.o; \ - sed -i -e "s@^[^:]*:@$$objfile:@" Makefile.temp0; \ - cat Makefile.temp0 >> Makefile.temp; \ - done - rm -f Makefile.temp0 Makefile.depend - sed -e "s@\(.*\)\.o:\(.*\)@.obj/\1$(OBJECT_SUFFIX_RELEASE).o:\2@" Makefile.temp >> Makefile.depend - sed -e "s@\(.*\)\.o:\(.*\)@.obj/\1$(OBJECT_SUFFIX_DEBUG).o:\2@" Makefile.temp >> Makefile.depend - sed -e "s@\(.*\)\.o:\(.*\)@.obj/\1$(OBJECT_SUFFIX_PROFILE).o:\2@" Makefile.temp >> Makefile.depend - rm -f Makefile.temp - -ifneq ($(MAKECMDGOALS),clean) - ifneq ($(MAKECMDGOALS),distclean) - -include Makefile.depend - endif -endif - -.PHONY: default all release debug profile clean distclean diff --git a/experiments/issue532/timers-microbenchmark/main.cc b/experiments/issue532/timers-microbenchmark/main.cc deleted file mode 100644 index 3b9e28c182..0000000000 --- a/experiments/issue532/timers-microbenchmark/main.cc +++ /dev/null @@ -1,50 +0,0 @@ -#include -#include -#include -#include - -#include -#include - -using namespace std; - - -void benchmark(const string &desc, int num_calls, - const function &func) { - cout << "Running " << desc << " " << num_calls << " times:" << flush; - clock_t start = clock(); - for (int i = 0; i < num_calls; ++i) - func(); - clock_t end = clock(); - double duration = static_cast(end - start) / CLOCKS_PER_SEC; - cout << " " << duration << " seconds" << endl; -} - - -double get_time_with_times() { - struct tms the_tms; - times(&the_tms); - clock_t clocks = the_tms.tms_utime + the_tms.tms_stime; - return double(clocks) / sysconf(_SC_CLK_TCK); -} - -double get_time_with_clock_gettime() { - timespec tp; - clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &tp); - return tp.tv_sec + tp.tv_nsec / 1e9; -} - - -int main(int, char **) { - const int NUM_ITERATIONS = 10000000; - - benchmark("nothing", NUM_ITERATIONS, [] () {}); - cout << endl; - benchmark("times()", - NUM_ITERATIONS, - [&]() {get_time_with_times();}); - benchmark("clock_gettime()", - NUM_ITERATIONS, - [&]() {get_time_with_clock_gettime();}); - return 0; -} diff --git a/experiments/issue533/common_setup.py b/experiments/issue533/common_setup.py deleted file mode 100644 index 90e985c980..0000000000 --- a/experiments/issue533/common_setup.py +++ /dev/null @@ -1,352 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in valid_attributes: - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue533/exp1.py b/experiments/issue533/exp1.py deleted file mode 100755 index ba81d4ec63..0000000000 --- a/experiments/issue533/exp1.py +++ /dev/null @@ -1,124 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -CONFIGS = { - "blind": [ - "--search", - "astar(blind)"], - "ff": [ - "--heuristic", - "h=ff()", - "--search", - "eager_greedy(h, preferred=h)"], -} - -REVS = ["issue533-base", "issue533-v1", "issue533-v1-debug"] - -LIMITS = {"search_time": 300} - -# We define a suite that consists of (really) all domains because for -# translator issues like this one, it's interesting what we do in -# obscure cases like airport-adl. The following is simply a list of -# all domains that were in the benchmarks directory at the time of -# this writing. -SUITE = [ - "airport", - "airport-adl", - "assembly", - "barman-opt11-strips", - "barman-sat11-strips", - "blocks", - "depot", - "driverlog", - "elevators-opt08-strips", - "elevators-opt11-strips", - "elevators-sat08-strips", - "elevators-sat11-strips", - "floortile-opt11-strips", - "floortile-sat11-strips", - "freecell", - "grid", - "gripper", - "logistics00", - "logistics98", - "miconic", - "miconic-fulladl", - "miconic-simpleadl", - "movie", - "mprime", - "mystery", - "no-mprime", - "no-mystery", - "nomystery-opt11-strips", - "nomystery-sat11-strips", - "openstacks", - "openstacks-opt08-adl", - "openstacks-opt08-strips", - "openstacks-opt11-strips", - "openstacks-sat08-adl", - "openstacks-sat08-strips", - "openstacks-sat11-strips", - "openstacks-strips", - "optical-telegraphs", - "parcprinter-08-strips", - "parcprinter-opt11-strips", - "parcprinter-sat11-strips", - "parking-opt11-strips", - "parking-sat11-strips", - "pathways", - "pathways-noneg", - "pegsol-08-strips", - "pegsol-opt11-strips", - "pegsol-sat11-strips", - "philosophers", - "pipesworld-notankage", - "pipesworld-tankage", - "psr-large", - "psr-middle", - "psr-small", - "rovers", - "satellite", - "scanalyzer-08-strips", - "scanalyzer-opt11-strips", - "scanalyzer-sat11-strips", - "schedule", - "sokoban-opt08-strips", - "sokoban-opt11-strips", - "sokoban-sat08-strips", - "sokoban-sat11-strips", - "storage", - "tidybot-opt11-strips", - "tidybot-sat11-strips", - "tpp", - "transport-opt08-strips", - "transport-opt11-strips", - "transport-sat08-strips", - "transport-sat11-strips", - "trucks", - "trucks-strips", - "visitall-opt11-strips", - "visitall-sat11-strips", - "woodworking-opt08-strips", - "woodworking-opt11-strips", - "woodworking-sat08-strips", - "woodworking-sat11-strips", - "zenotravel", -] - -exp = common_setup.IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) - -exp.add_comparison_table_step( - attributes=exp.DEFAULT_TABLE_ATTRIBUTES + - ["translate_*", "translator_*"]) - -exp() diff --git a/experiments/issue535/common_setup.py b/experiments/issue535/common_setup.py deleted file mode 100644 index 4dff4aacfd..0000000000 --- a/experiments/issue535/common_setup.py +++ /dev/null @@ -1,335 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step( - 'publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step( - "publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step(step_name, make_scatter_plots)) diff --git a/experiments/issue535/relativescatter.py b/experiments/issue535/relativescatter.py deleted file mode 100644 index 14d5d42752..0000000000 --- a/experiments/issue535/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue535/v1.py b/experiments/issue535/v1.py deleted file mode 100755 index 8f88bd655e..0000000000 --- a/experiments/issue535/v1.py +++ /dev/null @@ -1,61 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue535-base", "issue535-v1"] -CONFIGS = [ - IssueConfig( - "lazy_greedy_ff", - ["--heuristic", "h=ff()", - "--search", "lazy_greedy(h, preferred=h)"]) -] -SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', - 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', - 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', - 'elevators-sat11-strips', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', - 'openstacks-sat08-strips', 'openstacks-sat11-strips', - 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', - 'parcprinter-08-strips', 'parcprinter-sat11-strips', - 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', - 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', - 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time"]) - -exp() diff --git a/experiments/issue535/v2.py b/experiments/issue535/v2.py deleted file mode 100755 index 9f297342f7..0000000000 --- a/experiments/issue535/v2.py +++ /dev/null @@ -1,61 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue535-v1", "issue535-v2"] -CONFIGS = [ - IssueConfig( - "lazy_greedy_ff", - ["--heuristic", "h=ff()", - "--search", "lazy_greedy(h, preferred=h)"]) -] -SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', - 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', - 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', - 'elevators-sat11-strips', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', - 'openstacks-sat08-strips', 'openstacks-sat11-strips', - 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', - 'parcprinter-08-strips', 'parcprinter-sat11-strips', - 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', - 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', - 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time"]) - -exp() diff --git a/experiments/issue535/v3.py b/experiments/issue535/v3.py deleted file mode 100755 index 1c0fe6997f..0000000000 --- a/experiments/issue535/v3.py +++ /dev/null @@ -1,61 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue535-v2", "issue535-v3"] -CONFIGS = [ - IssueConfig( - "lazy_greedy_ff", - ["--heuristic", "h=ff()", - "--search", "lazy_greedy(h, preferred=h)"]) -] -SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', - 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', - 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', - 'elevators-sat11-strips', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', - 'openstacks-sat08-strips', 'openstacks-sat11-strips', - 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', - 'parcprinter-08-strips', 'parcprinter-sat11-strips', - 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', - 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', - 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time"]) - -exp() diff --git a/experiments/issue535/v4.py b/experiments/issue535/v4.py deleted file mode 100755 index e70e219fef..0000000000 --- a/experiments/issue535/v4.py +++ /dev/null @@ -1,61 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue535-v3", "issue535-v4"] -CONFIGS = [ - IssueConfig( - "lazy_greedy_ff", - ["--heuristic", "h=ff()", - "--search", "lazy_greedy(h, preferred=h)"]) -] -SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', - 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', - 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', - 'elevators-sat11-strips', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', - 'openstacks-sat08-strips', 'openstacks-sat11-strips', - 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', - 'parcprinter-08-strips', 'parcprinter-sat11-strips', - 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', - 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', - 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time"]) - -exp() diff --git a/experiments/issue535/v5.py b/experiments/issue535/v5.py deleted file mode 100755 index 39d9628650..0000000000 --- a/experiments/issue535/v5.py +++ /dev/null @@ -1,64 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue535-base", "issue535-v5"] -CONFIGS = [ - IssueConfig( - "{search}_ff".format(**locals()), - ["--heuristic", "h=ff()", - "--search", "{search}(h, preferred=h)".format(**locals())]) - for search in ["lazy_greedy", "eager_greedy", "ehc"] -] + [ - IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]) -] -SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', - 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', - 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', - 'elevators-sat11-strips', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', - 'openstacks-sat08-strips', 'openstacks-sat11-strips', - 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', - 'parcprinter-08-strips', 'parcprinter-sat11-strips', - 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', - 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', - 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time"]) - -exp() diff --git a/experiments/issue535/v6.py b/experiments/issue535/v6.py deleted file mode 100755 index b8bc1e9b88..0000000000 --- a/experiments/issue535/v6.py +++ /dev/null @@ -1,62 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue535-base", "issue535-v6"] -CONFIGS = [ - IssueConfig( - "ehc-ff-pref-{heuristic}-{preferred_usage}".format(**locals()), - ["--heuristic", "hff=ff()", "--search", "ehc(hff, preferred={heuristic}, preferred_usage={preferred_usage})".format(**locals())]) - for preferred_usage in ["RANK_PREFERRED_FIRST", "PRUNE_BY_PREFERRED"] - for heuristic in ["add()", "hff"] -] -SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', - 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', - 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', - 'elevators-sat11-strips', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', - 'openstacks-sat08-strips', 'openstacks-sat11-strips', - 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', - 'parcprinter-08-strips', 'parcprinter-sat11-strips', - 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', - 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', - 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time"]) - -exp() diff --git a/experiments/issue535/v7-randomized.py b/experiments/issue535/v7-randomized.py deleted file mode 100755 index 4008ed9e2f..0000000000 --- a/experiments/issue535/v7-randomized.py +++ /dev/null @@ -1,64 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue535-base", "issue535-v7"] -CONFIGS = [ - IssueConfig( - "lazy-ff-pref-{pref_first}-randomize-{randomize}-seed-{seed}".format(**locals()), - ["--heuristic", "hff=ff()", "--random-seed", str(seed), "--search", - "lazy_greedy(hff, preferred_successors_first={pref_first}, randomize_successors={randomize}, preferred=hff)".format(**locals())]) - for pref_first in [True] - for randomize in [True] - for seed in [0, 1, 2] -] -SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', - 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', - 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', - 'elevators-sat11-strips', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', - 'openstacks-sat08-strips', 'openstacks-sat11-strips', - 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', - 'parcprinter-08-strips', 'parcprinter-sat11-strips', - 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', - 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', - 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time", "evaluations"]) - -exp() diff --git a/experiments/issue535/v7.py b/experiments/issue535/v7.py deleted file mode 100755 index b0f5b37ea2..0000000000 --- a/experiments/issue535/v7.py +++ /dev/null @@ -1,63 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue535-base", "issue535-v7"] -CONFIGS = [ - IssueConfig( - "lazy-ff-pref-{pref_first}-randomize-{randomize}".format(**locals()), - ["--heuristic", "hff=ff()", "--search", - "lazy_greedy(hff, preferred_successors_first={pref_first}, randomize_successors={randomize}, preferred=hff)".format(**locals())]) - for pref_first in [False, True] - for randomize in [False, True] -] -SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', - 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', - 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', - 'elevators-sat11-strips', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', - 'openstacks-sat08-strips', 'openstacks-sat11-strips', - 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', - 'parcprinter-08-strips', 'parcprinter-sat11-strips', - 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', - 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', - 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time"]) - -exp() diff --git a/experiments/issue536/common_setup.py b/experiments/issue536/common_setup.py deleted file mode 100644 index 6989ef8686..0000000000 --- a/experiments/issue536/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in self.get_supported_attributes( - config_nick, attributes): - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue536/ipdb.py b/experiments/issue536/ipdb.py deleted file mode 100755 index 9e5ad69faa..0000000000 --- a/experiments/issue536/ipdb.py +++ /dev/null @@ -1,25 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue536-base", "issue536-v1", "issue536-v2"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_optimal_with_ipc11() - -CONFIGS = { - "ipdb": ["--search", "astar(ipdb())"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_absolute_report_step() - -exp() diff --git a/experiments/issue540/common_setup.py b/experiments/issue540/common_setup.py deleted file mode 100644 index 90e985c980..0000000000 --- a/experiments/issue540/common_setup.py +++ /dev/null @@ -1,352 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in valid_attributes: - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue540/v1-opt.py b/experiments/issue540/v1-opt.py deleted file mode 100755 index 9cb1b2a592..0000000000 --- a/experiments/issue540/v1-opt.py +++ /dev/null @@ -1,25 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue540-base", "issue540-v1"] -LIMITS = {"search_time": 300} -SUITE = suites.suite_optimal_with_ipc11() -CONFIGS = { - "blind": ["--search", "astar(blind())"], - "ipdb": ["--search", "astar(ipdb(max_time=150))"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue540/v1-sat.py b/experiments/issue540/v1-sat.py deleted file mode 100755 index 3cc817faa1..0000000000 --- a/experiments/issue540/v1-sat.py +++ /dev/null @@ -1,36 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue540-base", "issue540-v1"] -LIMITS = {"search_time": 300} -SUITE = suites.suite_satisficing_with_ipc11() -CONFIGS = { - "blind": ["--search", "astar(blind())"], - "lama-first": [ - "--if-unit-cost", - "--heuristic", - "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true))", - "--search", - "lazy_greedy([hff,hlm],preferred=[hff,hlm])", - "--if-non-unit-cost", - "--heuristic", - "hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=one,cost_type=one))", - "--search", - "lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1],cost_type=one,reopen_closed=false)", - "--always"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue544/common_setup.py b/experiments/issue544/common_setup.py deleted file mode 100644 index 90e985c980..0000000000 --- a/experiments/issue544/common_setup.py +++ /dev/null @@ -1,352 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in valid_attributes: - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue544/regression-v1.py b/experiments/issue544/regression-v1.py deleted file mode 100755 index baf92d5d9f..0000000000 --- a/experiments/issue544/regression-v1.py +++ /dev/null @@ -1,66 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- -""" -Before you can run the experiment you need to create duplicates of the -two tasks we want to test: - -cd ../benchmarks/trucks-strips -for i in {00..19}; do cp p16.pddl p16-$i.pddl; done -for i in {00..19}; do cp domain_p16.pddl domain_p16-$i.pddl; done - -cd ../freecell -for i in {00..19}; do cp probfreecell-11-5.pddl probfreecell-11-5-$i.pddl; done - -Don't forget to remove the duplicate tasks afterwards. Otherwise they -will be included in subsequent experiments. -""" - -import common_setup - - -REVS = ["issue544-base", "issue544-v1"] -LIMITS = {"search_time": 1800} -CONFIGS = { - "eager_greedy_add": [ - "--heuristic", - "h=add()", - "--search", - "eager_greedy(h, preferred=h)"], - "eager_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "eager_greedy(h, preferred=h)"], - "lazy_greedy_add": [ - "--heuristic", - "h=add()", - "--search", - "lazy_greedy(h, preferred=h)"], - "lazy_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "lazy_greedy(h, preferred=h)"], -} - -TEST_RUN = False - -if TEST_RUN: - SUITE = "gripper:prob01.pddl" - PRIORITY = None # "None" means local experiment -else: - SUITE = (["trucks-strips:p16-%02d.pddl" % i for i in range(20)] + - ["freecell:probfreecell-11-5-%02d.pddl" % i for i in range(20)]) - PRIORITY = 0 # number means maia experiment - - -exp = common_setup.IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue544/sat-v1.py b/experiments/issue544/sat-v1.py deleted file mode 100755 index 238d439b03..0000000000 --- a/experiments/issue544/sat-v1.py +++ /dev/null @@ -1,43 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue544-base", "issue544-v1"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_satisficing_with_ipc11() -CONFIGS = { - "eager_greedy_add": [ - "--heuristic", - "h=add()", - "--search", - "eager_greedy(h, preferred=h)"], - "eager_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "eager_greedy(h, preferred=h)"], - "lazy_greedy_add": [ - "--heuristic", - "h=add()", - "--search", - "lazy_greedy(h, preferred=h)"], - "lazy_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "lazy_greedy(h, preferred=h)"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue544/sat-v2.py b/experiments/issue544/sat-v2.py deleted file mode 100755 index b6f7c93690..0000000000 --- a/experiments/issue544/sat-v2.py +++ /dev/null @@ -1,28 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue544-base-v2", "issue544-v2"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_satisficing_with_ipc11() -CONFIGS = { - "eager_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "eager_greedy(h, preferred=h)"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue544/sat-v3.py b/experiments/issue544/sat-v3.py deleted file mode 100755 index 5b68c4f4ed..0000000000 --- a/experiments/issue544/sat-v3.py +++ /dev/null @@ -1,28 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue544-base-v2", "issue544-v2", "issue544-v3"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_satisficing_with_ipc11() -CONFIGS = { - "eager_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "eager_greedy(h, preferred=h)"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue546/common_setup.py b/experiments/issue546/common_setup.py deleted file mode 100644 index 6989ef8686..0000000000 --- a/experiments/issue546/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in self.get_supported_attributes( - config_nick, attributes): - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue546/v1-limits.py b/experiments/issue546/v1-limits.py deleted file mode 100755 index 88b97b214c..0000000000 --- a/experiments/issue546/v1-limits.py +++ /dev/null @@ -1,42 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue546-v1"] -LIMITS = {"search_time": 300, "search_memory": 1024} -SUITE = suites.suite_optimal_with_ipc11() - -CONFIGS = { - "blind-fd-limits": ["--search", "astar(blind())"], - "blind-lab-limits": ["--search", "astar(blind())"], -} - -class FastDownwardLimits(common_setup.IssueExperiment): - def _make_search_runs(self): - common_setup.IssueExperiment._make_search_runs(self) - for run in self.runs: - if "fd-limits" in run.properties["config_nick"]: - # Move limits to fast-downward.py - search_args, search_kwargs = run.commands["search"] - time_limit = search_kwargs["time_limit"] - mem_limit = search_kwargs["mem_limit"] - del search_kwargs["time_limit"] - del search_kwargs["mem_limit"] - search_args.insert(1, "--search-timeout") - search_args.insert(2, str(time_limit)) - search_args.insert(3, "--search-memory") - search_args.insert(4, str(mem_limit)) - -exp = FastDownwardLimits( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_absolute_report_step() - -exp() diff --git a/experiments/issue546/v1-opt-fdss.py b/experiments/issue546/v1-opt-fdss.py deleted file mode 100755 index dc6d28613a..0000000000 --- a/experiments/issue546/v1-opt-fdss.py +++ /dev/null @@ -1,27 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue546-base", "issue546-v1"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_optimal_with_ipc11() - -CONFIGS = { - "seq_opt_fdss_1": ["--alias", "seq-opt-fdss-1"], - "seq_opt_fdss_2": ["--alias", "seq-opt-fdss-2"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step( - attributes=common_setup.IssueExperiment.PORTFOLIO_ATTRIBUTES) - -exp() diff --git a/experiments/issue546/v1-sat-fdss.py b/experiments/issue546/v1-sat-fdss.py deleted file mode 100755 index 2c1bdd7380..0000000000 --- a/experiments/issue546/v1-sat-fdss.py +++ /dev/null @@ -1,27 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue546-base", "issue546-v1"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_satisficing_with_ipc11() - -CONFIGS = { - "seq_sat_fdss_1": ["--alias", "seq-sat-fdss-1"], - "seq_sat_fdss_2": ["--alias", "seq-sat-fdss-2"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step( - attributes=common_setup.IssueExperiment.PORTFOLIO_ATTRIBUTES) - -exp() diff --git a/experiments/issue547/common_setup.py b/experiments/issue547/common_setup.py deleted file mode 100644 index 635088ec30..0000000000 --- a/experiments/issue547/common_setup.py +++ /dev/null @@ -1,399 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.reports import Table -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports import PlanningReport -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - # TODO: Add something about errors/exit codes. - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for attribute in valid_attributes: - name = "-".join([rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, os.path.join(scatter_dir, name)) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) - - -class RegressionReport(PlanningReport): - """ - Compare revisions for tasks on which the first revision performs - better than other revisions. - - *revision_nicks* must be a list of revision_nicks, e.g. - ["default", "issue123"]. - - *config_nicks* must be a list of configuration nicknames, e.g. - ["eager_greedy_ff", "eager_greedy_add"]. - - *regression_attribute* is the attribute that we compare between - different revisions. It defaults to "coverage". - - Example comparing search_time for tasks were we lose coverage:: - - exp.add_report(RegressionReport(revision_nicks=["default", "issue123"], - config_nicks=["eager_greedy_ff"], - regression_attribute="coverage", - attributes="search_time")) - """ - def __init__(self, revision_nicks, config_nicks, - regression_attribute="coverage", **kwargs): - PlanningReport.__init__(self, **kwargs) - assert revision_nicks - self.revision_nicks = revision_nicks - assert config_nicks - self.config_nicks = config_nicks - self.regression_attribute = regression_attribute - - def get_markup(self): - tables = [] - for (domain, problem) in self.problems: - for config_nick in self.config_nicks: - runs = [self.runs[(domain, problem, rev + "-" + config_nick)] - for rev in self.revision_nicks] - - if any(runs[0][self.regression_attribute] > - runs[i][self.regression_attribute] - for i in range(1, len(self.revision_nicks))): - print "\"%s:%s\"," % (domain, problem) - table = Table() - for rev, run in zip(self.revision_nicks, runs): - for attr in self.attributes: - table.add_cell(rev, attr, run.get(attr)) - table_name = ":".join((domain, problem, config_nick)) - tables.append((table_name, table)) - return "\n".join(name + "\n" + str(table) for name, table in tables) diff --git a/experiments/issue547/custom-parser.py b/experiments/issue547/custom-parser.py deleted file mode 100755 index 24745c7d39..0000000000 --- a/experiments/issue547/custom-parser.py +++ /dev/null @@ -1,19 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - - -class CustomParser(Parser): - def __init__(self): - Parser.__init__(self) - self.add_pattern( - "successor_generator_time", - "Building successor generator...done! \[t=(.+)s\]", - required=False, - type=float) - - -if __name__ == "__main__": - parser = CustomParser() - print "Running custom parser" - parser.parse() diff --git a/experiments/issue547/issue547-v2-lama.py b/experiments/issue547/issue547-v2-lama.py deleted file mode 100755 index 68cf7149e9..0000000000 --- a/experiments/issue547/issue547-v2-lama.py +++ /dev/null @@ -1,54 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from downward.reports.scatter import ScatterPlotReport - -import common_setup -from relativescatter import RelativeScatterPlotReport - - -SEARCH_REVS = ["issue547-base", "issue547-v2"] -SUITE = suites.suite_satisficing_with_ipc11() - -CONFIGS = { - 'lama-2011-first': [ - "--if-unit-cost", - "--heuristic", - "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true))", - "--search", - "lazy_greedy([hff,hlm],preferred=[hff,hlm])", - "--if-non-unit-cost", - "--heuristic", - "hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true," - " lm_cost_type=one,cost_type=one))", - "--heuristic", - "hlm2,hff2=lm_ff_syn(lm_rhw(reasonable_orders=true," - " lm_cost_type=plusone,cost_type=plusone))", - "--search", - "lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], cost_type=one,reopen_closed=false)", - ], -} - -exp = common_setup.IssueExperiment( - revisions=SEARCH_REVS, - configs=CONFIGS, - suite=SUITE, - ) -exp.add_search_parser("custom-parser.py") - -attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["successor_generator_time", "reopened_until_last_jump"] -exp.add_comparison_table_step(attributes=attributes) - -for conf in CONFIGS: - for attr in ("memory", "search_time"): - exp.add_report( - RelativeScatterPlotReport( - attributes=[attr], - get_category=lambda run1, run2: run1.get("domain"), - filter_config=["issue547-base-%s" % conf, "issue547-v2-%s" % conf] - ), - outfile='issue547_base_v2-sat_%s_%s.png' % (conf, attr) - ) - -exp() diff --git a/experiments/issue547/issue547-v2-opt.py b/experiments/issue547/issue547-v2-opt.py deleted file mode 100755 index 3d765673c9..0000000000 --- a/experiments/issue547/issue547-v2-opt.py +++ /dev/null @@ -1,41 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from downward.reports.scatter import ScatterPlotReport - -import common_setup -from relativescatter import RelativeScatterPlotReport - - -SEARCH_REVS = ["issue547-base", "issue547-v2"] -SUITE = suites.suite_optimal_with_ipc11() - -CONFIGS = { - 'astar_ipdb': [ - '--search', - 'astar(ipdb())'], -} - -exp = common_setup.IssueExperiment( - revisions=SEARCH_REVS, - configs=CONFIGS, - suite=SUITE, - ) -exp.add_search_parser("custom-parser.py") - -attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["successor_generator_time", "reopened_until_last_jump"] -exp.add_comparison_table_step(attributes=attributes) - -for conf in CONFIGS: - for attr in ("memory", "search_time"): - exp.add_report( - RelativeScatterPlotReport( - attributes=[attr], - get_category=lambda run1, run2: run1.get("domain"), - filter_config=["issue547-base-%s" % conf, "issue547-v2-%s" % conf] - ), - outfile='issue547_base_v2_%s_%s.png' % (conf, attr) - ) - -exp() diff --git a/experiments/issue547/issue547-v2-sat.py b/experiments/issue547/issue547-v2-sat.py deleted file mode 100755 index 129628ed35..0000000000 --- a/experiments/issue547/issue547-v2-sat.py +++ /dev/null @@ -1,56 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from downward.reports.scatter import ScatterPlotReport - -import common_setup -from relativescatter import RelativeScatterPlotReport - - -SEARCH_REVS = ["issue547-base", "issue547-v2"] -SUITE = suites.suite_satisficing_with_ipc11() - -CONFIGS = { - 'astar_blind': [ - '--search', - 'astar(blind())'], - 'lazy_greedy_cg': [ - '--heuristic', - 'h=cg()', - '--search', - 'lazy_greedy(h, preferred=h)'], - 'lazy_greedy_cg_randomized': [ - '--heuristic', - 'h=cg()', - '--search', - 'lazy_greedy(h, preferred=h, randomize_successors=true)'], - 'eager_greedy_ff': [ - '--heuristic', - 'h=ff()', - '--search', - 'eager_greedy(h, preferred=h)'], -} - -exp = common_setup.IssueExperiment( - revisions=SEARCH_REVS, - configs=CONFIGS, - suite=SUITE, - ) -exp.add_search_parser("custom-parser.py") - -attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["successor_generator_time", "reopened_until_last_jump"] -exp.add_comparison_table_step(attributes=attributes) - -for conf in CONFIGS: - for attr in ("memory", "search_time"): - exp.add_report( - RelativeScatterPlotReport( - attributes=[attr], - get_category=lambda run1, run2: run1.get("domain"), - filter_config=["issue547-base-%s" % conf, "issue547-v2-%s" % conf] - ), - outfile='issue547_base_v2-sat_%s_%s.png' % (conf, attr) - ) - -exp() diff --git a/experiments/issue547/issue547.py b/experiments/issue547/issue547.py deleted file mode 100755 index fc338720ed..0000000000 --- a/experiments/issue547/issue547.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import configs, suites -from downward.reports.scatter import ScatterPlotReport -# Cactus plots are experimental in lab, and require some changes to -# classes in lab, so we cannot add them es external files here. -try: - from downward.reports.cactus import CactusPlotReport - has_cactus_plot = True -except: - has_cactus_plot = False -from lab.experiment import Step -from lab.fetcher import Fetcher - -import common_setup -from relativescatter import RelativeScatterPlotReport - - -SEARCH_REVS = ["issue547-base", "issue547-v1"] -SUITE = suites.suite_optimal_with_ipc11() - -CONFIGS = { - 'astar_blind': [ - '--search', - 'astar(blind())'], - 'astar_ipdb': [ - '--search', - 'astar(ipdb())'], - 'astar_lmcut': [ - '--search', - 'astar(lmcut())'], - 'astar_pdb': [ - '--search', - 'astar(pdb())'], -} - -exp = common_setup.IssueExperiment( - revisions=SEARCH_REVS, - configs=CONFIGS, - suite=SUITE, - ) -exp.add_search_parser("custom-parser.py") -exp.add_step(Step('refetch', Fetcher(), exp.path, parsers=['custom-parser.py'])) - -attributes = attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["successor_generator_time", "reopened_until_last_jump"] -exp.add_comparison_table_step(attributes=attributes) - -for conf in CONFIGS: - for attr in ("memory", "search_time"): - exp.add_report( - RelativeScatterPlotReport( - attributes=[attr], - get_category=lambda run1, run2: run1.get("domain"), - filter_config=["issue547-base-%s" % conf, "issue547-v1-%s" % conf] - ), - outfile='issue547_base_v1_%s_%s.png' % (conf, attr) - ) - -if has_cactus_plot: - exp.add_report(CactusPlotReport(attributes=['successor_generator_time'], - filter_config_nick="astar_blind", - ylabel='successor_generator_time', - get_category=lambda run: run['config_nick'], - category_styles={'astar_blind': {'linestyle': '-', 'c':'red'}} - )) - - -exp() diff --git a/experiments/issue547/relativescatter.py b/experiments/issue547/relativescatter.py deleted file mode 100644 index 41a8385a87..0000000000 --- a/experiments/issue547/relativescatter.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -# -# downward uses the lab package to conduct experiments with the -# Fast Downward planning system. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from collections import defaultdict -import os - -from lab import tools - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue548/base-v2.py b/experiments/issue548/base-v2.py deleted file mode 100755 index ba940c7e94..0000000000 --- a/experiments/issue548/base-v2.py +++ /dev/null @@ -1,77 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -import common_setup - - -REVS = ["issue548-base", "issue548-v2"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_optimal_with_ipc11() - -B_CONFIGS = { - 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], -} -G_CONFIGS = { - 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], -} -F_CONFIGS = { - 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], -} -CONFIGS = dict(B_CONFIGS) -CONFIGS.update(G_CONFIGS) -CONFIGS.update(F_CONFIGS) - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) -exp.add_search_parser('ms-parser.py') - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) -proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) -actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - actual_search_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) - -exp() diff --git a/experiments/issue548/common_setup.py b/experiments/issue548/common_setup.py deleted file mode 100644 index 7dc18efa07..0000000000 --- a/experiments/issue548/common_setup.py +++ /dev/null @@ -1,364 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, email=None, processes=1, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority, - email=email) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - subprocess.call(['publish', outfile]) - - self.add_step(Step('publish-comparison-reports', publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in valid_attributes: - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue548/mas-refetch.py b/experiments/issue548/mas-refetch.py deleted file mode 100755 index bc19438b21..0000000000 --- a/experiments/issue548/mas-refetch.py +++ /dev/null @@ -1,77 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -import common_setup - - -REVS = ["issue548-base", "issue548-v1"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_optimal_with_ipc11() - -B_CONFIGS = { - 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], -} -G_CONFIGS = { - 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], -} -F_CONFIGS = { - 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], -} -CONFIGS = dict(B_CONFIGS) -CONFIGS.update(G_CONFIGS) -CONFIGS.update(F_CONFIGS) - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) -proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) -actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - actual_search_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_fetcher('data/issue548-mas', parsers='ms-parser.py') -exp.add_comparison_table_step(attributes=attributes) - -exp() diff --git a/experiments/issue548/mas.py b/experiments/issue548/mas.py deleted file mode 100755 index 9c33948a84..0000000000 --- a/experiments/issue548/mas.py +++ /dev/null @@ -1,43 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue548-base", "issue548-v1"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_optimal_with_ipc11() - -B_CONFIGS = { - 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], -} -G_CONFIGS = { - 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], -} -F_CONFIGS = { - 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], -} -CONFIGS = dict(B_CONFIGS) -CONFIGS.update(G_CONFIGS) -CONFIGS.update(F_CONFIGS) - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue548/ms-parser.py b/experiments/issue548/ms-parser.py deleted file mode 100755 index d817543b1c..0000000000 --- a/experiments/issue548/ms-parser.py +++ /dev/null @@ -1,76 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('initial_h_value', 'initial h value: (\d+)', required=False, type=int) -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - - # Compute actual search time - if ms_abstraction_constructed == True and props.get('search_time') is not None: - difference = props.get('search_time') - props.get('ms_construction_time') - props['actual_search_time'] = difference - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -parser.parse() diff --git a/experiments/issue549/common_setup.py b/experiments/issue549/common_setup.py deleted file mode 100644 index c628c8b6c5..0000000000 --- a/experiments/issue549/common_setup.py +++ /dev/null @@ -1,300 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments.fast_downward_experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(FastDownwardExperiment): - """Wrapper for FastDownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, revisions, suite, build_options=None, - driver_options=None, grid_priority=None, - test_suite=None, email=None, processes=1, **kwargs): - """Create an FastDownwardExperiment with some convenience features. - All configs will be run on all revisions. Inherited options - *path*, *environment* and *cache_dir* from FastDownwardExperiment - are not supported and will be automatically set. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. nick will - automatically get the revision prepended, e.g. - 'issue123-base-':: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *revisions* must be a non-empty list of revisions, which - specify which planner versions to use in the experiment. - The same versions are used for translator, preprocessor - and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - environment = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - environment = MaiaEnvironment(priority=grid_priority, - email=email) - - FastDownwardExperiment.__init__(self, environment=environment, - **kwargs) - - # Automatically deduce the downward repository from the file - repo = get_repo_base() - self.algorithm_nicks = [] - self.revisions = revisions - for nick, cmdline in configs.items(): - for rev in revisions: - algo_nick = '%s-%s' % (rev, nick) - self.add_algorithm(algo_nick, repo, rev, cmdline, - build_options, driver_options) - self.algorithm_nicks.append(algo_nick) - - benchmarks_dir = os.path.join(repo, 'benchmarks') - self.add_suite(benchmarks_dir, suite) - self.search_parsers = [] - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - # oufile is of the form --...-. - outfile = '' - for rev in self.revisions: - outfile += rev - outfile += '-' - outfile = outfile[:len(outfile)-1] - outfile += '.' - outfile += report.output_format - outfile = os.path.join(self.eval_dir, outfile) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revisions, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % - (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revisions, 2): - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % - (rev1, rev2)) - subprocess.call(['publish', outfile]) - - self.add_step(Step('publish-comparison-reports', publish_comparison_tables)) - - # TODO: this is copied from the old common_setup, but not tested - # with the new FastDownwardExperiment class! - def add_scatter_plot_step(self, attributes=None): - print 'This has not been tested with the new FastDownwardExperiment class!' - exit(0) - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in valid_attributes: - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue549/issue549-v1.py b/experiments/issue549/issue549-v1.py deleted file mode 100755 index 54943371ed..0000000000 --- a/experiments/issue549/issue549-v1.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(revisions=["issue549-base", "issue549-v1"]) diff --git a/experiments/issue549/issue549-v3.py b/experiments/issue549/issue549-v3.py deleted file mode 100755 index afad278c6a..0000000000 --- a/experiments/issue549/issue549-v3.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(revisions=["issue549-base", "issue549-v3"]) diff --git a/experiments/issue549/main.py b/experiments/issue549/main.py deleted file mode 100644 index 2d0162c795..0000000000 --- a/experiments/issue549/main.py +++ /dev/null @@ -1,36 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -import common_setup - - -def main(revisions=None): - SUITE = suites.suite_satisficing_with_ipc11() - - CONFIGS = { - 'cea': ['--search', 'eager_greedy(cea())'], - 'cg': ['--search', 'eager_greedy(cg())'], - 'lmcount': ['--search', 'eager_greedy(lmcount(lm_rhw()))'], - } - - exp = common_setup.IssueExperiment( - revisions=revisions, - configs=CONFIGS, - suite=SUITE, - test_suite=['depot:pfile1'], - processes=4, - email='gabriele.roeger@unibas.ch', - grid_priority=-10, - ) - - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.append('landmarks') - attributes.append('landmarks_generation_time') - - - exp.add_comparison_table_step(attributes=attributes) - - exp() diff --git a/experiments/issue551/common_setup.py b/experiments/issue551/common_setup.py deleted file mode 100644 index 4dff4aacfd..0000000000 --- a/experiments/issue551/common_setup.py +++ /dev/null @@ -1,335 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step( - 'publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step( - "publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step(step_name, make_scatter_plots)) diff --git a/experiments/issue551/relativescatter.py b/experiments/issue551/relativescatter.py deleted file mode 100644 index 14d5d42752..0000000000 --- a/experiments/issue551/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue551/v1-lama-opt.py b/experiments/issue551/v1-lama-opt.py deleted file mode 100755 index 7960eb0b9d..0000000000 --- a/experiments/issue551/v1-lama-opt.py +++ /dev/null @@ -1,28 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -from downward import suites -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue551-base", "issue551-v1"] -BENCHMARKS = os.path.expanduser('~/downward-benchmarks') -SUITE = suites.suite_optimal_strips() - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), -] - -exp = IssueExperiment( - revisions=REVS, - benchmarks_dir=BENCHMARKS, - suite=SUITE, - configs=CONFIGS, - processes=4, - email="manuel.heusner@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue551/v1-lama-sat.py b/experiments/issue551/v1-lama-sat.py deleted file mode 100755 index c36e3ecb52..0000000000 --- a/experiments/issue551/v1-lama-sat.py +++ /dev/null @@ -1,44 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -from downward import suites -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue551-base", "issue551-v1"] -BENCHMARKS = os.path.expanduser('~/downward-benchmarks') -SUITE = suites.suite_satisficing() - -CONFIGS = [ - IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]), - IssueConfig("lm_hm", [ - "--landmarks", "lm=lm_hm(2)", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy(hlm)"]), - IssueConfig("lm_exhaust", [ - "--landmarks", "lm=lm_exhaust()", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy(hlm)"]), - IssueConfig("lm_rhw", [ - "--landmarks", "lm=lm_rhw()", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy(hlm)"]), - IssueConfig("lm_zg", [ - "--landmarks", "lm=lm_zg()", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy(hlm)"]), -] - -exp = IssueExperiment( - revisions=REVS, - benchmarks_dir=BENCHMARKS, - suite=SUITE, - configs=CONFIGS, - processes=4, - email="manuel.heusner@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue551/v2-lama-opt.py b/experiments/issue551/v2-lama-opt.py deleted file mode 100755 index 48a45176be..0000000000 --- a/experiments/issue551/v2-lama-opt.py +++ /dev/null @@ -1,28 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -from downward import suites -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue551-base", "issue551-v2"] -BENCHMARKS = os.path.expanduser('~/downward-benchmarks') -SUITE = suites.suite_optimal_strips() - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), -] - -exp = IssueExperiment( - revisions=REVS, - benchmarks_dir=BENCHMARKS, - suite=SUITE, - configs=CONFIGS, - processes=4, - email="manuel.heusner@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue551/v2-lama-sat.py b/experiments/issue551/v2-lama-sat.py deleted file mode 100755 index db08e2b2e0..0000000000 --- a/experiments/issue551/v2-lama-sat.py +++ /dev/null @@ -1,44 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -from downward import suites -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue551-base", "issue551-v2"] -BENCHMARKS = os.path.expanduser('~/downward-benchmarks') -SUITE = suites.suite_satisficing_strips() - -CONFIGS = [ - IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]), - IssueConfig("lm_hm", [ - "--landmarks", "lm=lm_hm(2)", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy(hlm)"]), - IssueConfig("lm_exhaust", [ - "--landmarks", "lm=lm_exhaust()", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy(hlm)"]), - IssueConfig("lm_rhw", [ - "--landmarks", "lm=lm_rhw()", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy(hlm)"]), - IssueConfig("lm_zg", [ - "--landmarks", "lm=lm_zg()", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy(hlm)"]), -] - -exp = IssueExperiment( - revisions=REVS, - benchmarks_dir=BENCHMARKS, - suite=SUITE, - configs=CONFIGS, - processes=4, - email="manuel.heusner@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue551/v3-lama-opt.py b/experiments/issue551/v3-lama-opt.py deleted file mode 100755 index 43e32fb6ab..0000000000 --- a/experiments/issue551/v3-lama-opt.py +++ /dev/null @@ -1,28 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -from downward import suites -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue551-base", "issue551-v3"] -BENCHMARKS = os.path.expanduser('~/downward-benchmarks') -SUITE = suites.suite_optimal_strips() - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), -] - -exp = IssueExperiment( - revisions=REVS, - benchmarks_dir=BENCHMARKS, - suite=SUITE, - configs=CONFIGS, - processes=4, - email="manuel.heusner@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue551/v3-lama-sat.py b/experiments/issue551/v3-lama-sat.py deleted file mode 100755 index cf0cf0bc62..0000000000 --- a/experiments/issue551/v3-lama-sat.py +++ /dev/null @@ -1,44 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -from downward import suites -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue551-base", "issue551-v3"] -BENCHMARKS = os.path.expanduser('~/downward-benchmarks') -SUITE = suites.suite_satisficing_strips() - -CONFIGS = [ - IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]), - IssueConfig("lm_hm", [ - "--landmarks", "lm=lm_hm(2)", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy(hlm)"]), - IssueConfig("lm_exhaust", [ - "--landmarks", "lm=lm_exhaust()", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy(hlm)"]), - IssueConfig("lm_rhw", [ - "--landmarks", "lm=lm_rhw()", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy(hlm)"]), - IssueConfig("lm_zg", [ - "--landmarks", "lm=lm_zg()", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy(hlm)"]), -] - -exp = IssueExperiment( - revisions=REVS, - benchmarks_dir=BENCHMARKS, - suite=SUITE, - configs=CONFIGS, - processes=4, - email="manuel.heusner@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue551/v4-lama-opt.py b/experiments/issue551/v4-lama-opt.py deleted file mode 100755 index f903c44dbe..0000000000 --- a/experiments/issue551/v4-lama-opt.py +++ /dev/null @@ -1,43 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from downward import suites - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - -REVS = ["issue551-base", "issue551-v4"] -BENCHMARKS = os.path.expanduser('~/downward-benchmarks') -SUITE = suites.suite_optimal() - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), - IssueConfig("seq-opt-bjolp-ocp", [ - "--landmarks", "lm=lm_merged([lm_rhw(),lm_hm(m=1)])", - "--heuristic", "hlm=lmcount(lm,admissible=true,optimal=true)", - "--search", "astar(hlm,mpd=true)"]), -] - -ENVIRONMENT = MaiaEnvironment( - priority=0, email="manuel.heusner@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - environment=ENVIRONMENT -) - -exp.add_suite(BENCHMARKS, SUITE) - -exp.add_comparison_table_step() -exp.add_comparison_table_step(attributes=["memory","total_time", "search_time", "landmarks_generation_time"]) -exp.add_scatter_plot_step(relative=True, attributes=["memory","total_time", "search_time", "landmarks_generation_time"]) - -exp() diff --git a/experiments/issue551/v4-lama-sat.py b/experiments/issue551/v4-lama-sat.py deleted file mode 100755 index e68b5f10d3..0000000000 --- a/experiments/issue551/v4-lama-sat.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from downward import suites - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - -REVS = ["issue551-base", "issue551-v4"] -BENCHMARKS = os.path.expanduser('~/downward-benchmarks') -SUITE = suites.suite_satisficing() - -CONFIGS = [ - IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]), - IssueConfig("lm_hm", [ - "--landmarks", "lm=lm_hm(2)", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy(hlm)"]), - IssueConfig("lm_exhaust", [ - "--landmarks", "lm=lm_exhaust()", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy(hlm)"]), - IssueConfig("lm_rhw", [ - "--landmarks", "lm=lm_rhw()", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy(hlm)"]), - IssueConfig("lm_zg", [ - "--landmarks", "lm=lm_zg()", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy(hlm)"]), -] - -ENVIRONMENT = MaiaEnvironment( - priority=0, email="manuel.heusner@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - environment=ENVIRONMENT -) - -exp.add_suite(BENCHMARKS, SUITE) - -exp.add_comparison_table_step() -exp.add_comparison_table_step(attributes=["memory","total_time", "search_time", "landmarks_generation_time"]) -exp.add_scatter_plot_step(relative=True, attributes=["memory","total_time", "search_time", "landmarks_generation_time"]) - -exp() diff --git a/experiments/issue551/v5-seq-sat-lama-2011.py b/experiments/issue551/v5-seq-sat-lama-2011.py deleted file mode 100755 index 875208a2ec..0000000000 --- a/experiments/issue551/v5-seq-sat-lama-2011.py +++ /dev/null @@ -1,37 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from downward import suites - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - -REVS = ["issue551-base", "issue551-v5"] -BENCHMARKS = os.path.expanduser('~/downward-benchmarks') -SUITE = suites.suite_satisficing() - -CONFIGS = [ - IssueConfig("seq-sat-lama-2011", [], driver_options=["--alias", "seq-sat-lama-2011"]), -] - -ENVIRONMENT = MaiaEnvironment( - priority=0, email="manuel.heusner@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - environment=ENVIRONMENT -) - -exp.add_suite(BENCHMARKS, SUITE) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue554/common_setup.py b/experiments/issue554/common_setup.py deleted file mode 100644 index 6989ef8686..0000000000 --- a/experiments/issue554/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in self.get_supported_attributes( - config_nick, attributes): - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue554/issue554.py b/experiments/issue554/issue554.py deleted file mode 100755 index 324fb93b01..0000000000 --- a/experiments/issue554/issue554.py +++ /dev/null @@ -1,24 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - -REVS = ["issue554-base", "issue554-v1"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_optimal_with_ipc11() - -CONFIGS = { - "astar_hmax": ["--search", "astar(hmax())"], - "gbfs_gc": ["--search", "eager_greedy(goalcount())"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() -exp() diff --git a/experiments/issue555/common_setup.py b/experiments/issue555/common_setup.py deleted file mode 100644 index 90e985c980..0000000000 --- a/experiments/issue555/common_setup.py +++ /dev/null @@ -1,352 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in valid_attributes: - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue555/issue555-v1.py b/experiments/issue555/issue555-v1.py deleted file mode 100755 index 7453bf04cb..0000000000 --- a/experiments/issue555/issue555-v1.py +++ /dev/null @@ -1,26 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue555-base", "issue555-v1"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_optimal_with_ipc11() -CONFIGS = { - 'astar_h2': [ - '--search', - 'astar(hm(2))'], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue555/issue555-v2.py b/experiments/issue555/issue555-v2.py deleted file mode 100755 index 69b1e05d46..0000000000 --- a/experiments/issue555/issue555-v2.py +++ /dev/null @@ -1,26 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue555-base", "issue555-v2"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_optimal_with_ipc11() -CONFIGS = { - 'astar_h2': [ - '--search', - 'astar(hm(2))'], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue558/common_setup.py b/experiments/issue558/common_setup.py deleted file mode 100644 index 6989ef8686..0000000000 --- a/experiments/issue558/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in self.get_supported_attributes( - config_nick, attributes): - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue558/v1-ext.py b/experiments/issue558/v1-ext.py deleted file mode 100755 index cf0fb2c002..0000000000 --- a/experiments/issue558/v1-ext.py +++ /dev/null @@ -1,33 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue558-base", "issue558-v1"] -LIMITS = {"search_time": 300} -SUITE = suites.suite_satisficing_with_ipc11() - -CONFIGS = { - "lazy_wa3_ff": [ - "--heuristic", "h=ff()", - "--search", "lazy_wastar(h,w=3,preferred=h)"], - "lazy_wa1000_ff": [ - "--heuristic", "h=ff()", - "--search", "lazy_wastar(h,w=1000,preferred=h)"], - "lazy_greedy_ff": [ - "--heuristic", "h=ff()", - "--search", "lazy_greedy(h,preferred=h,reopen_closed=true)"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue558/v1.py b/experiments/issue558/v1.py deleted file mode 100755 index e991cacf73..0000000000 --- a/experiments/issue558/v1.py +++ /dev/null @@ -1,34 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup - - -REVS = ["issue558-base", "issue558-v1"] -LIMITS = {"search_time": 300} -SUITE = suites.suite_satisficing_with_ipc11() - -CONFIGS = { - "lazy_wa3_ff": [ - "--heuristic", - "h=ff()", - "--search", - "lazy_wastar(h,w=3,preferred=h)"], - "lama-w5": [ - "--heuristic", - "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true," - " lm_cost_type=plusone,cost_type=plusone))", - "--search", "lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5)"] -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue560/common_setup.py b/experiments/issue560/common_setup.py deleted file mode 100644 index 6989ef8686..0000000000 --- a/experiments/issue560/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in self.get_supported_attributes( - config_nick, attributes): - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue560/issue560.py b/experiments/issue560/issue560.py deleted file mode 100755 index ba44379774..0000000000 --- a/experiments/issue560/issue560.py +++ /dev/null @@ -1,35 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from relativescatter import RelativeScatterPlotReport -import common_setup - - -REVS = ["issue560-base", "issue560-v1"] -SUITE = suites.suite_all() - -# We are only interested in the preprocessing here and will only run the first steps of the experiment. -CONFIGS = { - "astar_blind": [ - "--search", - "astar(blind())"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - ) - -exp.add_report( - RelativeScatterPlotReport( - attributes=["preprocess_wall_clock_time"], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue560_base_v1_preprocess_wall_clock_time.png' -) - -exp.add_absolute_report_step(attributes=["preprocess_wall_clock_time"]) - -exp() diff --git a/experiments/issue560/relativescatter.py b/experiments/issue560/relativescatter.py deleted file mode 100644 index 41a8385a87..0000000000 --- a/experiments/issue560/relativescatter.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -# -# downward uses the lab package to conduct experiments with the -# Fast Downward planning system. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from collections import defaultdict -import os - -from lab import tools - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue561/README b/experiments/issue561/README deleted file mode 100644 index 86baba8330..0000000000 --- a/experiments/issue561/README +++ /dev/null @@ -1,9 +0,0 @@ -Description of tags for this issue: - -- issue561-base: base version before any changes -- issue561-v1: mostly cleanup; introduce smart pointers in some places -- issue561-v2: split off class for heuristic representation -- issue561-v3: split off class for distance computations -- issue561-v4: split off rudimentary FactoredTransitionSystem class - and move construction of atomic transition systems into a factory -- issue561-v5: remove "messy" methods of Distances diff --git a/experiments/issue561/common_setup.py b/experiments/issue561/common_setup.py deleted file mode 100644 index 9cd99e539f..0000000000 --- a/experiments/issue561/common_setup.py +++ /dev/null @@ -1,358 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, email=None, processes=1, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority, - email=email) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in self.get_supported_attributes( - config_nick, attributes): - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue561/main.py b/experiments/issue561/main.py deleted file mode 100644 index 52a9980353..0000000000 --- a/experiments/issue561/main.py +++ /dev/null @@ -1,78 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -import common_setup - - -def main(rev1, rev2): - REVS = [rev1, rev2] - LIMITS = {"search_time": 1800} - SUITE = suites.suite_optimal_with_ipc11() - - B_CONFIGS = { - 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], -} - G_CONFIGS = { - 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], -} - F_CONFIGS = { - 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], -} - CONFIGS = dict(B_CONFIGS) - CONFIGS.update(G_CONFIGS) - CONFIGS.update(F_CONFIGS) - - exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - test_suite=['depot:pfile1'], - processes=4, - email='malte.helmert@unibas.ch', - ) - exp.add_search_parser('ms-parser.py') - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - actual_search_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step(attributes=attributes) - - exp() diff --git a/experiments/issue561/ms-parser.py b/experiments/issue561/ms-parser.py deleted file mode 100755 index d817543b1c..0000000000 --- a/experiments/issue561/ms-parser.py +++ /dev/null @@ -1,76 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('initial_h_value', 'initial h value: (\d+)', required=False, type=int) -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - - # Compute actual search time - if ms_abstraction_constructed == True and props.get('search_time') is not None: - difference = props.get('search_time') - props.get('ms_construction_time') - props['actual_search_time'] = difference - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -parser.parse() diff --git a/experiments/issue561/v1.py b/experiments/issue561/v1.py deleted file mode 100755 index 4fbf828c1f..0000000000 --- a/experiments/issue561/v1.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main("issue561-base", "issue561-v1") diff --git a/experiments/issue561/v2.py b/experiments/issue561/v2.py deleted file mode 100755 index 86d782fd2d..0000000000 --- a/experiments/issue561/v2.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main("issue561-v1", "issue561-v2") diff --git a/experiments/issue561/v3.py b/experiments/issue561/v3.py deleted file mode 100755 index 0c86f8666f..0000000000 --- a/experiments/issue561/v3.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main("issue561-v2", "issue561-v3") diff --git a/experiments/issue561/v4.py b/experiments/issue561/v4.py deleted file mode 100755 index c82508fe93..0000000000 --- a/experiments/issue561/v4.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main("issue561-v3", "issue561-v4") diff --git a/experiments/issue561/v5.py b/experiments/issue561/v5.py deleted file mode 100755 index 970bbddee8..0000000000 --- a/experiments/issue561/v5.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main("issue561-v4", "issue561-v5") diff --git a/experiments/issue571/README b/experiments/issue571/README deleted file mode 100644 index 8875add1a5..0000000000 --- a/experiments/issue571/README +++ /dev/null @@ -1,12 +0,0 @@ -Description of tags for this issue: - -- issue571-base: base version before any changes -- issue571-v1: split off class for LabelGroup and related data structure from TransitionSystem -- issue571-base-v2: default branch revision after a few M&S fixes (partly related to Windows) -- issue571-v2: after merging issue571-base-v2 from default -- issue571-v3: in constructor, label reduction of locally equivalent labels, and computation of locally equivalent labels in TransitionSystem, use group ids and methods of LabelEquivalenceRelation rather than manipulating the groups directly -- issue571-v4: label reduction of non locally equivalent labels also uses the new class -- issue571-v5: use a custom iterator when accessing LabelEquivalenceRelation (to hide the underlying containers) -- issue571-v6: use vector instead of list to store LabelGroup objects -- issue571-v7: move all implementation details of LabelGroupConstIterator into cc-file; move the class to its own file; hide LabelEquivalenceRelation and LabelGroup from transition_system.h and its users. -- issue571-v8: LabelGroupConstIterator became TSConstIterator and now lives in transition_system.h/cc. It also has a reference to the transitions, so iterating over TransitionSystem now allows to have access to both labels and transitions diff --git a/experiments/issue571/common_setup.py b/experiments/issue571/common_setup.py deleted file mode 100644 index 25ce522645..0000000000 --- a/experiments/issue571/common_setup.py +++ /dev/null @@ -1,366 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, email=None, processes=1, - combinations=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority, - email=email) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, combinations, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - if combinations: - kwargs["combinations"].extend([ - (Translator(repo, comb[0]), - Preprocessor(repo, comb[1]), - Planner(repo, comb[2])) - for comb in combinations]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in self.get_supported_attributes( - config_nick, attributes): - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue571/compare-base-base-v2.py b/experiments/issue571/compare-base-base-v2.py deleted file mode 100755 index ab72942c7c..0000000000 --- a/experiments/issue571/compare-base-base-v2.py +++ /dev/null @@ -1,101 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport - -import common_setup - - -REVS = ["issue571-base", "issue571-base-v2"] -LIMITS = {"search_time": 1800} -SUITE = suites.suite_optimal_with_ipc11() - -B_CONFIGS = { - 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], -} -G_CONFIGS = { - 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], -} -F_CONFIGS = { - 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], -} -CONFIGS = dict(B_CONFIGS) -CONFIGS.update(G_CONFIGS) -CONFIGS.update(F_CONFIGS) - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) -proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) -actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - actual_search_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -def filter_base1(run): - config = run['config'] - if config.startswith('issue571-base-'): - return True - return False - -def filter_base2(run): - config = run['config'] - if config.startswith('issue571-base-issue571-base-issue571-base-v2-'): - return True - return False - -exp.add_fetcher('data/issue571-v1-eval', filter=filter_base1) -exp.add_fetcher('data/issue571-v2-eval', filter=filter_base2) -# TODO: the following does not work, presumably because issue571-base -# is not represented as issue571-base-issue571-base-issue571-base -#exp.add_comparison_table_step(attributes=attributes) -compared_configs=[] -configs1 = ['issue571-base-%s' % conf for conf in CONFIGS] -configs2 = ['issue571-base-issue571-base-issue571-base-v2-%s' % conf for conf in CONFIGS] -for index in range(len(configs1)): - compared_configs.append((configs1[index], configs2[index])) -exp.add_report(CompareConfigsReport(attributes=attributes, - compared_configs=compared_configs)) - -exp() diff --git a/experiments/issue571/main.py b/experiments/issue571/main.py deleted file mode 100644 index 0fa5a79a35..0000000000 --- a/experiments/issue571/main.py +++ /dev/null @@ -1,78 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -import common_setup - - -def main(revisions=None, combinations=None): - LIMITS = {"search_time": 1800} - SUITE = suites.suite_optimal_with_ipc11() - - B_CONFIGS = { - 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], -} - G_CONFIGS = { - 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], -} - F_CONFIGS = { - 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], -} - CONFIGS = dict(B_CONFIGS) - CONFIGS.update(G_CONFIGS) - CONFIGS.update(F_CONFIGS) - - exp = common_setup.IssueExperiment( - search_revisions=revisions, - combinations=combinations, - configs=CONFIGS, - suite=SUITE, - limits=LIMITS, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_search_parser('ms-parser.py') - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - actual_search_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step(attributes=attributes) - - exp() diff --git a/experiments/issue571/ms-parser.py b/experiments/issue571/ms-parser.py deleted file mode 100755 index d817543b1c..0000000000 --- a/experiments/issue571/ms-parser.py +++ /dev/null @@ -1,76 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('initial_h_value', 'initial h value: (\d+)', required=False, type=int) -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - - # Compute actual search time - if ms_abstraction_constructed == True and props.get('search_time') is not None: - difference = props.get('search_time') - props.get('ms_construction_time') - props['actual_search_time'] = difference - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -parser.parse() diff --git a/experiments/issue571/v1.py b/experiments/issue571/v1.py deleted file mode 100755 index 53f1a725e3..0000000000 --- a/experiments/issue571/v1.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(revisions=["issue571-base", "issue571-v1"]) diff --git a/experiments/issue571/v2.py b/experiments/issue571/v2.py deleted file mode 100755 index e32b37c967..0000000000 --- a/experiments/issue571/v2.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(combinations=[("issue571-base", "issue571-base", "issue571-base-v2"), ("issue571-base", "issue571-base", "issue571-v2")]) diff --git a/experiments/issue571/v3.py b/experiments/issue571/v3.py deleted file mode 100755 index 2cd12ec5f3..0000000000 --- a/experiments/issue571/v3.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(combinations=[("issue571-base", "issue571-base", "issue571-v2"), ("issue571-base", "issue571-base", "issue571-v3")]) diff --git a/experiments/issue571/v4.py b/experiments/issue571/v4.py deleted file mode 100755 index bfb799691d..0000000000 --- a/experiments/issue571/v4.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(combinations=[("issue571-base", "issue571-base", "issue571-v3"), ("issue571-base", "issue571-base", "issue571-v4")]) diff --git a/experiments/issue571/v5.py b/experiments/issue571/v5.py deleted file mode 100755 index ec76bac11c..0000000000 --- a/experiments/issue571/v5.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(combinations=[("issue571-base", "issue571-base", "issue571-v4"), ("issue571-base", "issue571-base", "issue571-v5")]) diff --git a/experiments/issue571/v6.py b/experiments/issue571/v6.py deleted file mode 100755 index 09ac8406fe..0000000000 --- a/experiments/issue571/v6.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(combinations=[("issue571-base", "issue571-base", "issue571-v5"), ("issue571-base", "issue571-base", "issue571-v6")]) diff --git a/experiments/issue571/v7.py b/experiments/issue571/v7.py deleted file mode 100755 index 877b7c0901..0000000000 --- a/experiments/issue571/v7.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(combinations=[("issue571-base", "issue571-base", "issue571-v6"), ("issue571-base", "issue571-base", "issue571-v7")]) diff --git a/experiments/issue571/v8.py b/experiments/issue571/v8.py deleted file mode 100755 index 88e40324b4..0000000000 --- a/experiments/issue571/v8.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(combinations=[("issue571-base", "issue571-base", "issue571-base-v2"), ("issue571-base", "issue571-base", "issue571-v7"), ("issue571-base", "issue571-base", "issue571-v8")]) diff --git a/experiments/issue578/common_setup.py b/experiments/issue578/common_setup.py deleted file mode 100644 index fe0b9d655e..0000000000 --- a/experiments/issue578/common_setup.py +++ /dev/null @@ -1,398 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_running_on_cluster_login_node(): - return platform.node() == "login20.cluster.bc2.ch" - - -def can_publish(): - return is_running_on_cluster_login_node() or not is_running_on_cluster() - - -def publish(report_file): - if can_publish(): - subprocess.call(["publish", report_file]) - else: - print "publishing reports is not supported on this node" - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, name="make-absolute-report", outfile=outfile) - self.add_step("publish-absolute-report", publish, outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def get_revision_pairs_and_files(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - yield (rev1, rev2, outfile) - - def make_comparison_tables(): - for rev1, rev2, outfile in get_revision_pairs_and_files(): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for _, _, outfile in get_revision_pairs_and_files(): - publish(outfile) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step("publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue578/custom-parser.py b/experiments/issue578/custom-parser.py deleted file mode 100755 index 8cdc8c5aae..0000000000 --- a/experiments/issue578/custom-parser.py +++ /dev/null @@ -1,32 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - - -def add_dominance_pruning_failed(content, props): - if "dominance_pruning=False" in content: - failed = False - elif "pdb_collection_construction_time" not in props: - failed = False - else: - failed = "dominance_pruning_time" not in props - props["dominance_pruning_failed"] = int(failed) - - -def main(): - print "Running custom parser" - parser = Parser() - parser.add_pattern( - "pdb_collection_construction_time", "^PDB collection construction time: (.+)s$", type=float, flags="M", required=False) - parser.add_pattern( - "dominance_pruning_time", "^Dominance pruning took (.+)s$", type=float, flags="M", required=False) - parser.add_pattern( - "dominance_pruning_pruned_subsets", "Pruned (\d+) of \d+ maximal additive subsets", type=int, required=False) - parser.add_pattern( - "dominance_pruning_pruned_pdbs", "Pruned (\d+) of \d+ PDBs", type=int, required=False) - parser.add_function(add_dominance_pruning_failed) - parser.parse() - - -main() - diff --git a/experiments/issue578/relativescatter.py b/experiments/issue578/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue578/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue578/v1-more-configs.py b/experiments/issue578/v1-more-configs.py deleted file mode 100755 index ac72078cb6..0000000000 --- a/experiments/issue578/v1-more-configs.py +++ /dev/null @@ -1,49 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue578-v1"] -CONFIGS = [ - IssueConfig('cpdbs-sys2-dp500', ['--search', 'astar(cpdbs(patterns=systematic(2),dominance_pruning_max_time=500))']), - IssueConfig('cpdbs-sys2-dp700', ['--search', 'astar(cpdbs(patterns=systematic(2),dominance_pruning_max_time=700))']), - IssueConfig('cpdbs-sys2-dp900', ['--search', 'astar(cpdbs(patterns=systematic(2),dominance_pruning_max_time=900))']), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="silvan.sievers@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource("custom_parser", "custom-parser.py") -exp.add_command("run-custom-parser", ["{custom_parser}"]) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend([ - "dominance_pruning_failed", - "dominance_pruning_time", - "dominance_pruning_pruned_subsets", - "dominance_pruning_pruned_pdbs", - "pdb_collection_construction_time", -]) -exp.add_absolute_report_step(attributes=attributes) -#exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue578/v1.py b/experiments/issue578/v1.py deleted file mode 100755 index ee3f807355..0000000000 --- a/experiments/issue578/v1.py +++ /dev/null @@ -1,56 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue578-v1"] -CONFIGS = [ - IssueConfig('cpdbs-hc900', ['--search', 'astar(cpdbs(patterns=hillclimbing(max_time=900)))']), - IssueConfig('cpdbs-hc900-dp30', ['--search', 'astar(cpdbs(patterns=hillclimbing(max_time=900),dominance_pruning_max_time=30))']), - IssueConfig('cpdbs-hc900-dp60', ['--search', 'astar(cpdbs(patterns=hillclimbing(max_time=900),dominance_pruning_max_time=60))']), - IssueConfig('cpdbs-hc900-dp300', ['--search', 'astar(cpdbs(patterns=hillclimbing(max_time=900),dominance_pruning_max_time=300))']), - IssueConfig('cpdbs-sys2', ['--search', 'astar(cpdbs(patterns=systematic(2)))']), - IssueConfig('cpdbs-sys2-dp30', ['--search', 'astar(cpdbs(patterns=systematic(2),dominance_pruning_max_time=30))']), - IssueConfig('cpdbs-sys2-dp60', ['--search', 'astar(cpdbs(patterns=systematic(2),dominance_pruning_max_time=60))']), - IssueConfig('cpdbs-sys2-dp300', ['--search', 'astar(cpdbs(patterns=systematic(2),dominance_pruning_max_time=300))']), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="silvan.sievers@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource("custom_parser", "custom-parser.py") -exp.add_command("run-custom-parser", ["{custom_parser}"]) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend([ - "dominance_pruning_failed", - "dominance_pruning_time", - "dominance_pruning_pruned_subsets", - "dominance_pruning_pruned_pdbs", - "pdb_collection_construction_time", -]) - -exp.add_fetcher('data/issue578-v1-more-configs-eval') -exp.add_absolute_report_step(attributes=attributes) -#exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue581/run-tests b/experiments/issue581/run-tests deleted file mode 100755 index 5b9af8b103..0000000000 --- a/experiments/issue581/run-tests +++ /dev/null @@ -1,17 +0,0 @@ -#! /bin/bash - -set -euo pipefail - -cd "$(dirname "$0")" -cd ../.. - -py.test driver/tests.py -py.test src/translate/tests/test_scripts.py - -misc/buildbot/buildbot-exp.py --rev baseline 1 - -misc/tests/run-all-code-tests - -# Commented out because compilation is slow. -# misc/make-ipc-submission seq-sat-lama-2011 64 - diff --git a/experiments/issue582/common_setup.py b/experiments/issue582/common_setup.py deleted file mode 100644 index 6989ef8686..0000000000 --- a/experiments/issue582/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in self.get_supported_attributes( - config_nick, attributes): - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue582/relativescatter.py b/experiments/issue582/relativescatter.py deleted file mode 100644 index 41a8385a87..0000000000 --- a/experiments/issue582/relativescatter.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -# -# downward uses the lab package to conduct experiments with the -# Fast Downward planning system. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from collections import defaultdict -import os - -from lab import tools - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue582/v1.py b/experiments/issue582/v1.py deleted file mode 100755 index 0d52f90f2d..0000000000 --- a/experiments/issue582/v1.py +++ /dev/null @@ -1,34 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from relativescatter import RelativeScatterPlotReport -import common_setup - - -REVS = ["issue582-base", "issue582-v1"] -SUITE = suites.suite_optimal_with_ipc11() - -CONFIGS = { - "astar_lmcut": [ - "--search", - "astar(lmcut())"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - ) - -exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue582_base_v1_total_time.png' -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue583/common_setup.py b/experiments/issue583/common_setup.py deleted file mode 100644 index c628c8b6c5..0000000000 --- a/experiments/issue583/common_setup.py +++ /dev/null @@ -1,300 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments.fast_downward_experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(FastDownwardExperiment): - """Wrapper for FastDownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, revisions, suite, build_options=None, - driver_options=None, grid_priority=None, - test_suite=None, email=None, processes=1, **kwargs): - """Create an FastDownwardExperiment with some convenience features. - All configs will be run on all revisions. Inherited options - *path*, *environment* and *cache_dir* from FastDownwardExperiment - are not supported and will be automatically set. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. nick will - automatically get the revision prepended, e.g. - 'issue123-base-':: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *revisions* must be a non-empty list of revisions, which - specify which planner versions to use in the experiment. - The same versions are used for translator, preprocessor - and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - environment = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - environment = MaiaEnvironment(priority=grid_priority, - email=email) - - FastDownwardExperiment.__init__(self, environment=environment, - **kwargs) - - # Automatically deduce the downward repository from the file - repo = get_repo_base() - self.algorithm_nicks = [] - self.revisions = revisions - for nick, cmdline in configs.items(): - for rev in revisions: - algo_nick = '%s-%s' % (rev, nick) - self.add_algorithm(algo_nick, repo, rev, cmdline, - build_options, driver_options) - self.algorithm_nicks.append(algo_nick) - - benchmarks_dir = os.path.join(repo, 'benchmarks') - self.add_suite(benchmarks_dir, suite) - self.search_parsers = [] - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - # oufile is of the form --...-. - outfile = '' - for rev in self.revisions: - outfile += rev - outfile += '-' - outfile = outfile[:len(outfile)-1] - outfile += '.' - outfile += report.output_format - outfile = os.path.join(self.eval_dir, outfile) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revisions, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % - (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revisions, 2): - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % - (rev1, rev2)) - subprocess.call(['publish', outfile]) - - self.add_step(Step('publish-comparison-reports', publish_comparison_tables)) - - # TODO: this is copied from the old common_setup, but not tested - # with the new FastDownwardExperiment class! - def add_scatter_plot_step(self, attributes=None): - print 'This has not been tested with the new FastDownwardExperiment class!' - exit(0) - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in valid_attributes: - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue583/issue583-v1.py b/experiments/issue583/issue583-v1.py deleted file mode 100755 index 682ebfd8d8..0000000000 --- a/experiments/issue583/issue583-v1.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(revisions=["issue583-base", "issue583-v1"]) diff --git a/experiments/issue583/issue583-v2.py b/experiments/issue583/issue583-v2.py deleted file mode 100755 index 0315d8e078..0000000000 --- a/experiments/issue583/issue583-v2.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(revisions=["issue583-v1", "issue583-v2"]) diff --git a/experiments/issue583/issue583-v3.py b/experiments/issue583/issue583-v3.py deleted file mode 100755 index 03623473e1..0000000000 --- a/experiments/issue583/issue583-v3.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(revisions=["issue583-base-v2", "issue583-v3"]) diff --git a/experiments/issue583/issue583-v4-dfp.py b/experiments/issue583/issue583-v4-dfp.py deleted file mode 100755 index c756e41ee9..0000000000 --- a/experiments/issue583/issue583-v4-dfp.py +++ /dev/null @@ -1,74 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -import common_setup - - -def main(revisions=None): - SUITE = suites.suite_optimal_with_ipc11() - - B_CONFIGS = { - 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - } - G_CONFIGS = { - 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - } - F_CONFIGS = { - 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - } - CONFIGS = dict(B_CONFIGS) - CONFIGS.update(G_CONFIGS) - CONFIGS.update(F_CONFIGS) - - exp = common_setup.IssueExperiment( - revisions=revisions, - configs=CONFIGS, - suite=SUITE, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - initial_h_value = Attribute('initial_h_value', absolute=False, min_wins=False) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - initial_h_value, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step(attributes=attributes) - - exp() - -if __name__ == '__main__': - main(revisions=['issue583-base-v2', 'issue583-v4']) diff --git a/experiments/issue583/issue583-v5.py b/experiments/issue583/issue583-v5.py deleted file mode 100755 index f102226098..0000000000 --- a/experiments/issue583/issue583-v5.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(revisions=["issue583-base-v3", "issue583-v5"]) diff --git a/experiments/issue583/main.py b/experiments/issue583/main.py deleted file mode 100644 index a2ab217f2d..0000000000 --- a/experiments/issue583/main.py +++ /dev/null @@ -1,75 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -import common_setup - - -def main(revisions=None): - SUITE = suites.suite_optimal_with_ipc11() - - B_CONFIGS = { - 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - } - G_CONFIGS = { - 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - } - F_CONFIGS = { - 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - } - CONFIGS = dict(B_CONFIGS) - CONFIGS.update(G_CONFIGS) - CONFIGS.update(F_CONFIGS) - - exp = common_setup.IssueExperiment( - revisions=revisions, - configs=CONFIGS, - suite=SUITE, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step(attributes=attributes) - - exp() diff --git a/experiments/issue583/ms-parser.py b/experiments/issue583/ms-parser.py deleted file mode 100755 index 4d8e840ae2..0000000000 --- a/experiments/issue583/ms-parser.py +++ /dev/null @@ -1,71 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[.+s\]', required=False, type=float) -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -parser.parse() diff --git a/experiments/issue585/common_setup.py b/experiments/issue585/common_setup.py deleted file mode 100644 index 00c25bb100..0000000000 --- a/experiments/issue585/common_setup.py +++ /dev/null @@ -1,319 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments.fast_downward_experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(FastDownwardExperiment): - """Wrapper for FastDownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, revisions, suite, build_options=None, - driver_options=None, grid_priority=None, - test_suite=None, email=None, processes=1, **kwargs): - """Create an FastDownwardExperiment with some convenience features. - All configs will be run on all revisions. Inherited options - *path*, *environment* and *cache_dir* from FastDownwardExperiment - are not supported and will be automatically set. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. nick will - automatically get the revision prepended, e.g. - 'issue123-base-':: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *revisions* must be a non-empty list of revisions, which - specify which planner versions to use in the experiment. - The same versions are used for translator, preprocessor - and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - environment = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - environment = MaiaEnvironment(priority=grid_priority, - email=email) - - FastDownwardExperiment.__init__(self, environment=environment, - **kwargs) - - # Automatically deduce the downward repository from the file - repo = get_repo_base() - self.algorithm_nicks = [] - self.revisions = revisions - for nick, cmdline in configs.items(): - for rev in revisions: - algo_nick = '%s-%s' % (rev, nick) - self.add_algorithm(algo_nick, repo, rev, cmdline, - build_options, driver_options) - self.algorithm_nicks.append(algo_nick) - - benchmarks_dir = os.path.join(repo, 'benchmarks') - self.add_suite(benchmarks_dir, suite) - self.search_parsers = [] - - # TODO: this method adds all search parsers. See next method. - def _add_runs(self): - FastDownwardExperiment._add_runs(self) - for run in self.runs: - for parser in self.search_parsers: - run.add_command(parser, [parser]) - - # TODO: copied adapted from downward/experiment. This method should - # be removed when FastDownwardExperiment supports adding search parsers. - def add_search_parser(self, path_to_parser): - """ - Invoke script at *path_to_parser* at the end of each search run. :: - - exp.add_search_parser('path/to/parser') - """ - if not os.path.isfile(path_to_parser): - logging.critical('Parser %s could not be found.' % path_to_parser) - if not os.access(path_to_parser, os.X_OK): - logging.critical('Parser %s is not executable.' % path_to_parser) - search_parser = 'search_parser%d' % len(self.search_parsers) - self.add_resource(search_parser, path_to_parser) - self.search_parsers.append(search_parser) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - # oufile is of the form --...-. - outfile = '' - for rev in self.revisions: - outfile += rev - outfile += '-' - outfile = outfile[:len(outfile)-1] - outfile += '.' - outfile += report.output_format - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revisions, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % - (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revisions, 2): - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % - (rev1, rev2)) - subprocess.call(['publish', outfile]) - - self.add_step(Step('publish-comparison-reports', publish_comparison_tables)) - - # TODO: test this! - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in valid_attributes: - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue585/relativescatter.py b/experiments/issue585/relativescatter.py deleted file mode 100644 index 41a8385a87..0000000000 --- a/experiments/issue585/relativescatter.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -# -# downward uses the lab package to conduct experiments with the -# Fast Downward planning system. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from collections import defaultdict -import os - -from lab import tools - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue585/v1.py b/experiments/issue585/v1.py deleted file mode 100755 index e23e784a3d..0000000000 --- a/experiments/issue585/v1.py +++ /dev/null @@ -1,66 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from downward.experiment import FastDownwardExperiment -from downward.reports.compare import CompareConfigsReport - -import common_setup - -REPO = common_setup.get_repo_base() -REV_BASE = 'issue585-base' -REV_V1 = 'issue585-v1' -SUITE = ['gripper:prob01.pddl'] # suites.suite_optimal_with_ipc11() -ALGORITHMS = { - 'astar_pdb_base': (REV_BASE, ['--search', 'astar(pdb())']), - 'astar_pdb_v1': (REV_V1, ['--search', 'astar(pdb())']), - - 'astar_cpdbs_base': (REV_BASE, ['--search', 'astar(cpdbs())']), - 'astar_cpdbs_v1': (REV_V1, ['--search', 'astar(cpdbs())']), - - 'astar_cpdbs_systematic_base': (REV_BASE, ['--search', 'astar(cpdbs_systematic())']), - 'astar_cpdbs_systematic_v1': (REV_V1, ['--search', 'astar(cpdbs(patterns=systematic()))']), - - 'astar_zopdbs_base': (REV_BASE, ['--search', 'astar(zopdbs())']), - 'astar_zopdbs_v1': (REV_V1, ['--search', 'astar(zopdbs())']), - - 'astar_ipdb_base': (REV_BASE, ['--search', 'astar(ipdb())']), - 'astar_ipdb_v1': (REV_V1, ['--search', 'astar(ipdb())']), - 'astar_ipdb_alias': (REV_V1, ['--search', 'astar(cpdbs(patterns=hillclimbing()))']), - - 'astar_gapdb_base': (REV_BASE, ['--search', 'astar(gapdb())']), - 'astar_gapdb_v1': (REV_V1, ['--search', 'astar(zopdbs(patterns=genetic()))']), - - 'astar_pho_systematic_base': (REV_BASE, ['--search', 'astar(operatorcounting([pho_constraints_systematic()]))']), - 'astar_pho_systematic_v1': (REV_V1, ['--search', 'astar(operatorcounting([pho_constraints(patterns=systematic())]))']), - - 'astar_pho_hillclimbing_base': (REV_BASE, ['--search', 'astar(operatorcounting([pho_constraints_ipdb()]))']), - 'astar_pho_hillclimbing_v1': (REV_V1, ['--search', 'astar(operatorcounting([pho_constraints(patterns=hillclimbing())]))']), -} -COMPARED_ALGORITHMS = [ - ('astar_pdb_base', 'astar_pdb_v1', 'Diff (pdb)'), - ('astar_cpdbs_base', 'astar_cpdbs_v1', 'Diff (cpdbs)'), - ('astar_cpdbs_systematic_base', 'astar_cpdbs_systematic_v1', 'Diff (cpdbs_systematic)'), - ('astar_zopdbs_base', 'astar_zopdbs_v1', 'Diff (zopdbs)'), - ('astar_ipdb_base', 'astar_ipdb_v1', 'Diff (ipdb)'), - ('astar_ipdb_v1', 'astar_ipdb_alias', 'Diff (ipdb_alias)'), - ('astar_gapdb_base', 'astar_gapdb_v1', 'Diff (gapdb)'), - ('astar_pho_systematic_base', 'astar_pho_systematic_v1', 'Diff (pho_systematic)'), - ('astar_pho_hillclimbing_base', 'astar_pho_hillclimbing_v1', 'Diff (pho_hillclimbing)'), -] - -exp = common_setup.IssueExperiment( - revisions=[], - configs={}, - suite=SUITE, -) - -for nick, (rev, cmd) in ALGORITHMS.items(): - exp.add_algorithm(nick, REPO, rev, cmd) - -exp.add_report(CompareConfigsReport( - COMPARED_ALGORITHMS, - attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES -)) - -exp() diff --git a/experiments/issue585/v2.py b/experiments/issue585/v2.py deleted file mode 100755 index 1dd5b7b8aa..0000000000 --- a/experiments/issue585/v2.py +++ /dev/null @@ -1,36 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from downward.experiment import FastDownwardExperiment -from downward.reports.compare import CompareConfigsReport - -import common_setup - -REPO = common_setup.get_repo_base() -REV_BASE = 'issue585-base' -REV_V1 = 'issue585-v2' -SUITE = suites.suite_optimal_with_ipc11() -ALGORITHMS = { - 'astar_ipdb_base': (REV_BASE, ['--search', 'astar(ipdb())']), - 'astar_ipdb_v2': (REV_V1, ['--search', 'astar(ipdb())']), -} -COMPARED_ALGORITHMS = [ - ('astar_ipdb_base', 'astar_ipdb_v2', 'Diff (ipdb)'), -] - -exp = common_setup.IssueExperiment( - revisions=[], - configs={}, - suite=SUITE, -) - -for nick, (rev, cmd) in ALGORITHMS.items(): - exp.add_algorithm(nick, REPO, rev, cmd) - -exp.add_report(CompareConfigsReport( - COMPARED_ALGORITHMS, - attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES -)) - -exp() diff --git a/experiments/issue585/v3-new-configs.py b/experiments/issue585/v3-new-configs.py deleted file mode 100755 index 81a95333f2..0000000000 --- a/experiments/issue585/v3-new-configs.py +++ /dev/null @@ -1,33 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from downward.experiment import FastDownwardExperiment -from downward.reports.compare import CompareConfigsReport - -import common_setup - -REPO = common_setup.get_repo_base() -REV_BASE = 'issue585-base' -REV_V3 = 'issue585-v3' -SUITE = suites.suite_optimal_with_ipc11() -ALGORITHMS = { - 'astar_cpdbs_genetic': (REV_V3, ['--search', 'astar(cpdbs(patterns=genetic()))']), - 'astar_zopdbs_systematic': (REV_V3, ['--search', 'astar(zopdbs(patterns=systematic()))']), - 'astar_zopdbs_hillclimbing': (REV_V3, ['--search', 'astar(zopdbs(patterns=hillclimbing()))']), - 'astar_pho_genetic': (REV_V3, ['--search', 'astar(operatorcounting([pho_constraints(patterns=genetic())]))']), - 'astar_pho_combo': (REV_V3, ['--search', 'astar(operatorcounting([pho_constraints(patterns=combo())]))']), -} - -exp = common_setup.IssueExperiment( - revisions=[], - configs={}, - suite=SUITE, -) - -for nick, (rev, cmd) in ALGORITHMS.items(): - exp.add_algorithm(nick, REPO, rev, cmd) - -exp.add_absolute_report_step() - -exp() diff --git a/experiments/issue585/v3-rest.py b/experiments/issue585/v3-rest.py deleted file mode 100755 index a19a405099..0000000000 --- a/experiments/issue585/v3-rest.py +++ /dev/null @@ -1,63 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from downward.experiment import FastDownwardExperiment -from downward.reports.compare import CompareConfigsReport - -from relativescatter import RelativeScatterPlotReport -import common_setup - -REPO = common_setup.get_repo_base() -REV_BASE = 'issue585-base' -REV_V3 = 'issue585-v3' -SUITE = suites.suite_optimal_with_ipc11() -ALGORITHMS = { - 'astar_pdb_base': (REV_BASE, ['--search', 'astar(pdb())']), - 'astar_pdb_v3': (REV_V3, ['--search', 'astar(pdb())']), - - 'astar_cpdbs_base': (REV_BASE, ['--search', 'astar(cpdbs())']), - 'astar_cpdbs_v3': (REV_V3, ['--search', 'astar(cpdbs())']), - - 'astar_cpdbs_systematic_base': (REV_BASE, ['--search', 'astar(cpdbs_systematic())']), - 'astar_cpdbs_systematic_v3': (REV_V3, ['--search', 'astar(cpdbs(patterns=systematic()))']), - - 'astar_zopdbs_base': (REV_BASE, ['--search', 'astar(zopdbs())']), - 'astar_zopdbs_v3': (REV_V3, ['--search', 'astar(zopdbs())']), - - 'astar_pho_systematic_base': (REV_BASE, ['--search', 'astar(operatorcounting([pho_constraints_systematic()]))']), - 'astar_pho_systematic_v3': (REV_V3, ['--search', 'astar(operatorcounting([pho_constraints(patterns=systematic())]))']), -} -COMPARED_ALGORITHMS = [ - ('astar_pdb_base', 'astar_pdb_v3', 'Diff (pdb)'), - ('astar_cpdbs_base', 'astar_cpdbs_v3', 'Diff (cpdbs)'), - ('astar_cpdbs_systematic_base', 'astar_cpdbs_systematic_v3', 'Diff (cpdbs_systematic)'), - ('astar_zopdbs_base', 'astar_zopdbs_v3', 'Diff (zopdbs)'), - ('astar_pho_systematic_base', 'astar_pho_systematic_v3', 'Diff (pho_systematic)'), -] - -exp = common_setup.IssueExperiment( - revisions=[], - configs={}, - suite=SUITE, -) - -for nick, (rev, cmd) in ALGORITHMS.items(): - exp.add_algorithm(nick, REPO, rev, cmd) - -exp.add_report(CompareConfigsReport( - COMPARED_ALGORITHMS, - attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES -)) - -for c1, c2, _ in COMPARED_ALGORITHMS: - exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=[c1, c2], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue585_%s_v3_total_time.png' % c1 - ) - -exp() diff --git a/experiments/issue585/v3.py b/experiments/issue585/v3.py deleted file mode 100755 index 208f2a8507..0000000000 --- a/experiments/issue585/v3.py +++ /dev/null @@ -1,59 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from downward.experiment import FastDownwardExperiment -from downward.reports.compare import CompareConfigsReport - -from relativescatter import RelativeScatterPlotReport -import common_setup - -REPO = common_setup.get_repo_base() -REV_BASE = 'issue585-base' -REV_V1 = 'issue585-v3' -SUITE = suites.suite_optimal_with_ipc11() -ALGORITHMS = { - 'astar_ipdb_base': (REV_BASE, ['--search', 'astar(ipdb())']), - 'astar_ipdb_v3': (REV_V1, ['--search', 'astar(ipdb())']), - - 'astar_gapdb_base': (REV_BASE, ['--search', 'astar(gapdb())']), - 'astar_gapdb_v3': (REV_V1, ['--search', 'astar(zopdbs(patterns=genetic()))']), -} -COMPARED_ALGORITHMS = [ - ('astar_ipdb_base', 'astar_ipdb_v3', 'Diff (ipdb)'), - ('astar_gapdb_base', 'astar_gapdb_v3', 'Diff (gapdb)'), -] - -exp = common_setup.IssueExperiment( - revisions=[], - configs={}, - suite=SUITE, -) - -for nick, (rev, cmd) in ALGORITHMS.items(): - exp.add_algorithm(nick, REPO, rev, cmd) - -exp.add_report(CompareConfigsReport( - COMPARED_ALGORITHMS, - attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES -)) - -exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=["astar_ipdb_base", "astar_ipdb_v3"], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue585_ipdb_base_v3_total_time.png' -) - -exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=["astar_gapdb_base", "astar_gapdb_v3"], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue585_gapdb_base_v3_total_time.png' -) - -exp() diff --git a/experiments/issue591/common_setup.py b/experiments/issue591/common_setup.py deleted file mode 100644 index 4dff4aacfd..0000000000 --- a/experiments/issue591/common_setup.py +++ /dev/null @@ -1,335 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step( - 'publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step( - "publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step(step_name, make_scatter_plots)) diff --git a/experiments/issue591/relativescatter.py b/experiments/issue591/relativescatter.py deleted file mode 100644 index 14d5d42752..0000000000 --- a/experiments/issue591/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue591/v1-opt.py b/experiments/issue591/v1-opt.py deleted file mode 100755 index 4b86d5114b..0000000000 --- a/experiments/issue591/v1-opt.py +++ /dev/null @@ -1,43 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue591-base", "issue591-v1"] -CONFIGS = [ - IssueConfig(heuristic, ["--search", "astar({})".format(heuristic)]) - for heuristic in [ - "blind()", "cegar(max_states=10000)", "hm()", "lmcut()", "hmax()"] -] -SUITE = [ - 'barman-opt14-strips', 'cavediving-14-adl', 'childsnack-opt14-strips', - 'citycar-opt14-adl', 'floortile-opt14-strips', 'ged-opt14-strips', - 'hiking-opt14-strips', 'maintenance-opt14-adl', - 'openstacks-opt14-strips', 'parking-opt14-strips', - 'tetris-opt14-strips', 'tidybot-opt14-strips', 'transport-opt14-strips', - 'visitall-opt14-strips'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(attributes=["total_time"]) - -exp() diff --git a/experiments/issue591/v1-sat.py b/experiments/issue591/v1-sat.py deleted file mode 100755 index a9fa6395cc..0000000000 --- a/experiments/issue591/v1-sat.py +++ /dev/null @@ -1,45 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue591-base", "issue591-v1"] -CONFIGS = [ - IssueConfig( - "lazy_greedy_{}".format(heuristic), - ["--heuristic", "h={}()".format(heuristic), - "--search", "lazy_greedy(h, preferred=h)"]) - for heuristic in ["add", "cea", "cg", "ff"] -] -SUITE = [ - 'barman-sat14-strips', 'cavediving-14-adl', 'childsnack-sat14-strips', - 'citycar-sat14-adl', 'floortile-sat14-strips', 'ged-sat14-strips', - 'hiking-sat14-strips', 'maintenance-sat14-adl', - 'openstacks-sat14-strips', 'parking-sat14-strips', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'transport-sat14-strips', 'visitall-sat14-strips'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(attributes=["total_time"]) - -exp() diff --git a/experiments/issue592/common_setup.py b/experiments/issue592/common_setup.py deleted file mode 100644 index 595d4e5cea..0000000000 --- a/experiments/issue592/common_setup.py +++ /dev/null @@ -1,341 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Wrapper for FastDownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions, configs, suite, grid_priority=None, - path=None, test_suite=None, email=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(os.path.join(repo, "benchmarks"), suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append(( - "{rev1}-{config_nick}".format(**locals()), - "{rev2}-{config_nick}".format(**locals()), - "Diff ({config_nick})".format(**locals()))) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "{name}-{rev1}-{rev2}-compare.html".format( - name=self.name, **locals())) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue592/v1-lama-opt.py b/experiments/issue592/v1-lama-opt.py deleted file mode 100755 index 0817f9483c..0000000000 --- a/experiments/issue592/v1-lama-opt.py +++ /dev/null @@ -1,25 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue592-base", "issue592-v1"] -SUITE = suites.suite_optimal_strips() - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="manuel.heusner@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue592/v1-lama-sat.py b/experiments/issue592/v1-lama-sat.py deleted file mode 100755 index 0af446c3a1..0000000000 --- a/experiments/issue592/v1-lama-sat.py +++ /dev/null @@ -1,26 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue592-base", "issue592-v1"] -SUITE = suites.suite_satisficing() - -CONFIGS = [ - IssueConfig("seq-sat-lama-2011", [], driver_options=["--alias", "seq-sat-lama-2011"]), - IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]), -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="manuel.heusner@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue592/v2-lama-opt.py b/experiments/issue592/v2-lama-opt.py deleted file mode 100755 index 4f2eddca27..0000000000 --- a/experiments/issue592/v2-lama-opt.py +++ /dev/null @@ -1,25 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue592-base", "issue592-v2"] -SUITE = suites.suite_optimal_strips() - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="manuel.heusner@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue592/v2-lama-sat.py b/experiments/issue592/v2-lama-sat.py deleted file mode 100755 index 0c8a78dd1f..0000000000 --- a/experiments/issue592/v2-lama-sat.py +++ /dev/null @@ -1,26 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue592-base", "issue592-v2"] -SUITE = suites.suite_satisficing() - -CONFIGS = [ - IssueConfig("seq-sat-lama-2011", [], driver_options=["--alias", "seq-sat-lama-2011"]), - IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]), -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="manuel.heusner@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue592/v3-lama-opt.py b/experiments/issue592/v3-lama-opt.py deleted file mode 100755 index aec5b33253..0000000000 --- a/experiments/issue592/v3-lama-opt.py +++ /dev/null @@ -1,25 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue592-base", "issue592-v3"] -SUITE = suites.suite_optimal_strips() - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="manuel.heusner@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue592/v3-lama-opt2.py b/experiments/issue592/v3-lama-opt2.py deleted file mode 100755 index 517f72914b..0000000000 --- a/experiments/issue592/v3-lama-opt2.py +++ /dev/null @@ -1,54 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue592-base", "issue592-v3"] -SUITE = suites.suite_optimal_strips() - -CONFIGS = [ - IssueConfig("lm_zg", [ - "--landmarks", - "lm=lm_zg()", - "--heuristic", - "hlm=lmcount(lm)", - "--search", - "astar(hlm)"]), - IssueConfig("lm_exhaust", [ - "--landmarks", - "lm=lm_exhaust()", - "--heuristic", - "hlm=lmcount(lm)", - "--search", - "astar(hlm)"]), - IssueConfig("lm_hm", [ - "--landmarks", - "lm=lm_hm(2)", - "--heuristic", - "hlm=lmcount(lm)", - "--search", - "astar(hlm)"]), - IssueConfig("lm_hm_max", [ - "--landmarks", - "lm=lm_hm(2)", - "--heuristic", - "h1=lmcount(lm,admissible=true)", - "--heuristic", - "h2=lmcount(lm,admissible=false)", - "--search", - "astar(max([h1,h2]))"]), -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="manuel.heusner@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue592/v3-lama-sat.py b/experiments/issue592/v3-lama-sat.py deleted file mode 100755 index 1986bcaf28..0000000000 --- a/experiments/issue592/v3-lama-sat.py +++ /dev/null @@ -1,26 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue592-base", "issue592-v3"] -SUITE = suites.suite_satisficing() - -CONFIGS = [ - IssueConfig("seq-sat-lama-2011", [], driver_options=["--alias", "seq-sat-lama-2011"]), - IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]), -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="manuel.heusner@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue592/v4-lama-opt.py b/experiments/issue592/v4-lama-opt.py deleted file mode 100755 index 083d5baa50..0000000000 --- a/experiments/issue592/v4-lama-opt.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue592-base", "issue592-v4"] -SUITE = suites.suite_optimal_strips() - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), - IssueConfig("lm_zg", [ - "--landmarks", - "lm=lm_zg()", - "--heuristic", - "hlm=lmcount(lm)", - "--search", - "astar(hlm)"]), - IssueConfig("lm_exhaust", [ - "--landmarks", - "lm=lm_exhaust()", - "--heuristic", - "hlm=lmcount(lm)", - "--search", - "astar(hlm)"]), - IssueConfig("lm_hm", [ - "--landmarks", - "lm=lm_hm(2)", - "--heuristic", - "hlm=lmcount(lm)", - "--search", - "astar(hlm)"]), - IssueConfig("lm_hm_max", [ - "--landmarks", - "lm=lm_hm(2)", - "--heuristic", - "h1=lmcount(lm,admissible=true)", - "--heuristic", - "h2=lmcount(lm,admissible=false)", - "--search", - "astar(max([h1,h2]))"]), -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="manuel.heusner@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue592/v4-lama-sat.py b/experiments/issue592/v4-lama-sat.py deleted file mode 100755 index 191cb74004..0000000000 --- a/experiments/issue592/v4-lama-sat.py +++ /dev/null @@ -1,26 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue592-base", "issue592-v4"] -SUITE = suites.suite_satisficing() - -CONFIGS = [ - IssueConfig("seq-sat-lama-2011", [], driver_options=["--alias", "seq-sat-lama-2011"]), - IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]), -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="manuel.heusner@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue595/common_setup.py b/experiments/issue595/common_setup.py deleted file mode 100644 index c628c8b6c5..0000000000 --- a/experiments/issue595/common_setup.py +++ /dev/null @@ -1,300 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments.fast_downward_experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(FastDownwardExperiment): - """Wrapper for FastDownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, revisions, suite, build_options=None, - driver_options=None, grid_priority=None, - test_suite=None, email=None, processes=1, **kwargs): - """Create an FastDownwardExperiment with some convenience features. - All configs will be run on all revisions. Inherited options - *path*, *environment* and *cache_dir* from FastDownwardExperiment - are not supported and will be automatically set. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. nick will - automatically get the revision prepended, e.g. - 'issue123-base-':: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *revisions* must be a non-empty list of revisions, which - specify which planner versions to use in the experiment. - The same versions are used for translator, preprocessor - and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - environment = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - environment = MaiaEnvironment(priority=grid_priority, - email=email) - - FastDownwardExperiment.__init__(self, environment=environment, - **kwargs) - - # Automatically deduce the downward repository from the file - repo = get_repo_base() - self.algorithm_nicks = [] - self.revisions = revisions - for nick, cmdline in configs.items(): - for rev in revisions: - algo_nick = '%s-%s' % (rev, nick) - self.add_algorithm(algo_nick, repo, rev, cmdline, - build_options, driver_options) - self.algorithm_nicks.append(algo_nick) - - benchmarks_dir = os.path.join(repo, 'benchmarks') - self.add_suite(benchmarks_dir, suite) - self.search_parsers = [] - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - # oufile is of the form --...-. - outfile = '' - for rev in self.revisions: - outfile += rev - outfile += '-' - outfile = outfile[:len(outfile)-1] - outfile += '.' - outfile += report.output_format - outfile = os.path.join(self.eval_dir, outfile) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revisions, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % - (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revisions, 2): - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % - (rev1, rev2)) - subprocess.call(['publish', outfile]) - - self.add_step(Step('publish-comparison-reports', publish_comparison_tables)) - - # TODO: this is copied from the old common_setup, but not tested - # with the new FastDownwardExperiment class! - def add_scatter_plot_step(self, attributes=None): - print 'This has not been tested with the new FastDownwardExperiment class!' - exit(0) - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in valid_attributes: - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue595/issue595-v1.py b/experiments/issue595/issue595-v1.py deleted file mode 100755 index 8bf7585dd5..0000000000 --- a/experiments/issue595/issue595-v1.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(revisions=["issue595-base", "issue595-v1"]) diff --git a/experiments/issue595/issue595-v2.py b/experiments/issue595/issue595-v2.py deleted file mode 100755 index a7c6c0659c..0000000000 --- a/experiments/issue595/issue595-v2.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(revisions=["issue595-v1", "issue595-v2"]) diff --git a/experiments/issue595/issue595-v3.py b/experiments/issue595/issue595-v3.py deleted file mode 100755 index 7332938841..0000000000 --- a/experiments/issue595/issue595-v3.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(revisions=["issue595-v1", "issue595-v3"]) diff --git a/experiments/issue595/main.py b/experiments/issue595/main.py deleted file mode 100644 index a2ab217f2d..0000000000 --- a/experiments/issue595/main.py +++ /dev/null @@ -1,75 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -import common_setup - - -def main(revisions=None): - SUITE = suites.suite_optimal_with_ipc11() - - B_CONFIGS = { - 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - } - G_CONFIGS = { - 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - } - F_CONFIGS = { - 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - } - CONFIGS = dict(B_CONFIGS) - CONFIGS.update(G_CONFIGS) - CONFIGS.update(F_CONFIGS) - - exp = common_setup.IssueExperiment( - revisions=revisions, - configs=CONFIGS, - suite=SUITE, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step(attributes=attributes) - - exp() diff --git a/experiments/issue595/ms-parser.py b/experiments/issue595/ms-parser.py deleted file mode 100755 index 4d8e840ae2..0000000000 --- a/experiments/issue595/ms-parser.py +++ /dev/null @@ -1,71 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[.+s\]', required=False, type=float) -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -parser.parse() diff --git a/experiments/issue596/common_setup.py b/experiments/issue596/common_setup.py deleted file mode 100644 index c628c8b6c5..0000000000 --- a/experiments/issue596/common_setup.py +++ /dev/null @@ -1,300 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments.fast_downward_experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(FastDownwardExperiment): - """Wrapper for FastDownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, revisions, suite, build_options=None, - driver_options=None, grid_priority=None, - test_suite=None, email=None, processes=1, **kwargs): - """Create an FastDownwardExperiment with some convenience features. - All configs will be run on all revisions. Inherited options - *path*, *environment* and *cache_dir* from FastDownwardExperiment - are not supported and will be automatically set. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. nick will - automatically get the revision prepended, e.g. - 'issue123-base-':: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *revisions* must be a non-empty list of revisions, which - specify which planner versions to use in the experiment. - The same versions are used for translator, preprocessor - and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - environment = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - environment = MaiaEnvironment(priority=grid_priority, - email=email) - - FastDownwardExperiment.__init__(self, environment=environment, - **kwargs) - - # Automatically deduce the downward repository from the file - repo = get_repo_base() - self.algorithm_nicks = [] - self.revisions = revisions - for nick, cmdline in configs.items(): - for rev in revisions: - algo_nick = '%s-%s' % (rev, nick) - self.add_algorithm(algo_nick, repo, rev, cmdline, - build_options, driver_options) - self.algorithm_nicks.append(algo_nick) - - benchmarks_dir = os.path.join(repo, 'benchmarks') - self.add_suite(benchmarks_dir, suite) - self.search_parsers = [] - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - # oufile is of the form --...-. - outfile = '' - for rev in self.revisions: - outfile += rev - outfile += '-' - outfile = outfile[:len(outfile)-1] - outfile += '.' - outfile += report.output_format - outfile = os.path.join(self.eval_dir, outfile) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revisions, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % - (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revisions, 2): - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % - (rev1, rev2)) - subprocess.call(['publish', outfile]) - - self.add_step(Step('publish-comparison-reports', publish_comparison_tables)) - - # TODO: this is copied from the old common_setup, but not tested - # with the new FastDownwardExperiment class! - def add_scatter_plot_step(self, attributes=None): - print 'This has not been tested with the new FastDownwardExperiment class!' - exit(0) - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in valid_attributes: - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue596/issue596-v1.py b/experiments/issue596/issue596-v1.py deleted file mode 100755 index 6e13d091f5..0000000000 --- a/experiments/issue596/issue596-v1.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from main import main - -main(revisions=["issue596-base", "issue596-v1"]) diff --git a/experiments/issue596/main.py b/experiments/issue596/main.py deleted file mode 100644 index a2ab217f2d..0000000000 --- a/experiments/issue596/main.py +++ /dev/null @@ -1,75 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -import common_setup - - -def main(revisions=None): - SUITE = suites.suite_optimal_with_ipc11() - - B_CONFIGS = { - 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - } - G_CONFIGS = { - 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - } - F_CONFIGS = { - 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - } - CONFIGS = dict(B_CONFIGS) - CONFIGS.update(G_CONFIGS) - CONFIGS.update(F_CONFIGS) - - exp = common_setup.IssueExperiment( - revisions=revisions, - configs=CONFIGS, - suite=SUITE, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step(attributes=attributes) - - exp() diff --git a/experiments/issue596/ms-parser.py b/experiments/issue596/ms-parser.py deleted file mode 100755 index 4d8e840ae2..0000000000 --- a/experiments/issue596/ms-parser.py +++ /dev/null @@ -1,71 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[.+s\]', required=False, type=float) -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -parser.parse() diff --git a/experiments/issue601/common_setup.py b/experiments/issue601/common_setup.py deleted file mode 100644 index c628c8b6c5..0000000000 --- a/experiments/issue601/common_setup.py +++ /dev/null @@ -1,300 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments.fast_downward_experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(FastDownwardExperiment): - """Wrapper for FastDownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, revisions, suite, build_options=None, - driver_options=None, grid_priority=None, - test_suite=None, email=None, processes=1, **kwargs): - """Create an FastDownwardExperiment with some convenience features. - All configs will be run on all revisions. Inherited options - *path*, *environment* and *cache_dir* from FastDownwardExperiment - are not supported and will be automatically set. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. nick will - automatically get the revision prepended, e.g. - 'issue123-base-':: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *revisions* must be a non-empty list of revisions, which - specify which planner versions to use in the experiment. - The same versions are used for translator, preprocessor - and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - environment = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - environment = MaiaEnvironment(priority=grid_priority, - email=email) - - FastDownwardExperiment.__init__(self, environment=environment, - **kwargs) - - # Automatically deduce the downward repository from the file - repo = get_repo_base() - self.algorithm_nicks = [] - self.revisions = revisions - for nick, cmdline in configs.items(): - for rev in revisions: - algo_nick = '%s-%s' % (rev, nick) - self.add_algorithm(algo_nick, repo, rev, cmdline, - build_options, driver_options) - self.algorithm_nicks.append(algo_nick) - - benchmarks_dir = os.path.join(repo, 'benchmarks') - self.add_suite(benchmarks_dir, suite) - self.search_parsers = [] - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - # oufile is of the form --...-. - outfile = '' - for rev in self.revisions: - outfile += rev - outfile += '-' - outfile = outfile[:len(outfile)-1] - outfile += '.' - outfile += report.output_format - outfile = os.path.join(self.eval_dir, outfile) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revisions, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % - (rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revisions, 2): - outfile = os.path.join(self.eval_dir, - "%s-%s-compare.html" % - (rev1, rev2)) - subprocess.call(['publish', outfile]) - - self.add_step(Step('publish-comparison-reports', publish_comparison_tables)) - - # TODO: this is copied from the old common_setup, but not tested - # with the new FastDownwardExperiment class! - def add_scatter_plot_step(self, attributes=None): - print 'This has not been tested with the new FastDownwardExperiment class!' - exit(0) - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in valid_attributes: - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue601/issue601-base.py b/experiments/issue601/issue601-base.py deleted file mode 100755 index 8550763283..0000000000 --- a/experiments/issue601/issue601-base.py +++ /dev/null @@ -1,74 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -import common_setup - -def main(revisions=None): - SUITE = suites.suite_optimal_with_ipc11() - - B_CONFIGS = { - 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - } - G_CONFIGS = { - 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=label_reduction(before_shrinking=true,before_merging=false)))'], - } - F_CONFIGS = { - 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=label_reduction(before_shrinking=false,before_merging=true)))'], - } - CONFIGS = dict(B_CONFIGS) - CONFIGS.update(G_CONFIGS) - CONFIGS.update(F_CONFIGS) - - exp = common_setup.IssueExperiment( - revisions=revisions, - configs=CONFIGS, - suite=SUITE, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp() - -main(revisions=["issue601-base"]) diff --git a/experiments/issue601/issue601-v1.py b/experiments/issue601/issue601-v1.py deleted file mode 100755 index b4f643bee4..0000000000 --- a/experiments/issue601/issue601-v1.py +++ /dev/null @@ -1,88 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm -from downward.reports.compare import CompareConfigsReport - -import common_setup - -def main(revisions=None): - SUITE = suites.suite_optimal_with_ipc11() - - B_CONFIGS = { - 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - } - G_CONFIGS = { - 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - } - F_CONFIGS = { - 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], - 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], - 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], - } - CONFIGS = dict(B_CONFIGS) - CONFIGS.update(G_CONFIGS) - CONFIGS.update(F_CONFIGS) - - exp = common_setup.IssueExperiment( - revisions=revisions, - configs=CONFIGS, - suite=SUITE, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_fetcher('data/issue601-base-eval') - exp.add_report(CompareConfigsReport(compared_configs=[ - ('issue601-base-rl-b50k', 'issue601-v1-rl-b50k'), - ('issue601-base-cggl-b50k', 'issue601-v1-cggl-b50k'), - ('issue601-base-dfp-b50k', 'issue601-v1-dfp-b50k'), - ('issue601-base-rl-ginf', 'issue601-v1-rl-ginf'), - ('issue601-base-cggl-ginf', 'issue601-v1-cggl-ginf'), - ('issue601-base-dfp-ginf', 'issue601-v1-dfp-ginf'), - ('issue601-base-rl-f50k', 'issue601-v1-rl-f50k'), - ('issue601-base-cggl-f50k', 'issue601-v1-cggl-f50k'), - ('issue601-base-dfp-f50k', 'issue601-v1-dfp-f50k'), - ],attributes=attributes)) - - exp() - -main(revisions=["issue601-v1"]) diff --git a/experiments/issue601/issue601-v2.py b/experiments/issue601/issue601-v2.py deleted file mode 100755 index 984a2eae2d..0000000000 --- a/experiments/issue601/issue601-v2.py +++ /dev/null @@ -1,76 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -import common_setup - -def main(revisions=None): - SUITE = suites.suite_optimal_with_ipc11() - - B_CONFIGS = { - 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - } - G_CONFIGS = { - 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - } - F_CONFIGS = { - 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], - 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], - 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], - } - CONFIGS = dict(B_CONFIGS) - CONFIGS.update(G_CONFIGS) - CONFIGS.update(F_CONFIGS) - - exp = common_setup.IssueExperiment( - revisions=revisions, - configs=CONFIGS, - suite=SUITE, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - exp() - -main(revisions=["issue601-v1", "issue601-v2"]) diff --git a/experiments/issue601/issue601-v3.py b/experiments/issue601/issue601-v3.py deleted file mode 100755 index 067be7245c..0000000000 --- a/experiments/issue601/issue601-v3.py +++ /dev/null @@ -1,76 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -import common_setup - -def main(revisions=None): - SUITE = suites.suite_optimal_with_ipc11() - - B_CONFIGS = { - 'rl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - 'cggl-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - 'dfp-b50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - } - G_CONFIGS = { - 'rl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - 'cggl-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - 'dfp-ginf': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))'], - } - F_CONFIGS = { - 'rl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], - 'cggl-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], - 'dfp-f50k': ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))'], - } - CONFIGS = dict(B_CONFIGS) - CONFIGS.update(G_CONFIGS) - CONFIGS.update(F_CONFIGS) - - exp = common_setup.IssueExperiment( - revisions=revisions, - configs=CONFIGS, - suite=SUITE, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - exp() - -main(revisions=["issue601-v2", "issue601-v3"]) diff --git a/experiments/issue601/ms-parser.py b/experiments/issue601/ms-parser.py deleted file mode 100755 index 4d8e840ae2..0000000000 --- a/experiments/issue601/ms-parser.py +++ /dev/null @@ -1,71 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[.+s\]', required=False, type=float) -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -parser.parse() diff --git a/experiments/issue602/common_setup.py b/experiments/issue602/common_setup.py deleted file mode 100644 index f553404641..0000000000 --- a/experiments/issue602/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Wrapper for FastDownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions, configs, suite, grid_priority=None, - path=None, test_suite=None, email=None, processes=1, - **kwargs): - """Create a DownwardExperiment with some convenience features. - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(os.path.join(repo, "benchmarks"), suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue602/v1-agl.py b/experiments/issue602/v1-agl.py deleted file mode 100755 index 32e9d30a86..0000000000 --- a/experiments/issue602/v1-agl.py +++ /dev/null @@ -1,99 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment - -SUITE_AGL14 = [ - 'barman-agl14-strips', - 'cavediving-agl14-adl', - 'childsnack-agl14-strips', - 'citycar-agl14-adl', - 'floortile-agl14-strips', - 'ged-agl14-strips', - 'hiking-agl14-strips', - 'maintenance-agl14-adl', - 'openstacks-agl14-strips', - 'parking-agl14-strips', - 'tetris-agl14-strips', - 'thoughtful-agl14-strips', - 'transport-agl14-strips', - 'visitall-agl14-strips', -] - -def main(revisions=None): - suite = SUITE_AGL14 - - configs = [ - IssueConfig("astar_goalcount", [ - "--search", - "astar(goalcount)"]), - IssueConfig("eager_greedy_ff", [ - "--heuristic", - "h=ff()", - "--search", - "eager_greedy(h, preferred=h)"]), - IssueConfig("eager_greedy_add", [ - "--heuristic", - "h=add()", - "--search", - "eager_greedy(h, preferred=h)"]), - IssueConfig("eager_greedy_cg", [ - "--heuristic", - "h=cg()", - "--search", - "eager_greedy(h, preferred=h)"]), - IssueConfig("eager_greedy_cea", [ - "--heuristic", - "h=cea()", - "--search", - "eager_greedy(h, preferred=h)"]), - IssueConfig("lazy_greedy_ff", [ - "--heuristic", - "h=ff()", - "--search", - "lazy_greedy(h, preferred=h)"]), - IssueConfig("lazy_greedy_add", [ - "--heuristic", - "h=add()", - "--search", - "lazy_greedy(h, preferred=h)"]), - IssueConfig("lazy_greedy_cg", [ - "--heuristic", - "h=cg()", - "--search", - "lazy_greedy(h, preferred=h)"]), - IssueConfig("seq_sat_lama_2011", [], driver_options=[ - "--alias", "seq-sat-lama-2011"]), - IssueConfig("seq_sat_fdss_1", [], driver_options=[ - "--alias", "seq-sat-fdss-1"]), - IssueConfig("seq_sat_fdss_2", [], driver_options=[ - "--alias", "seq-sat-fdss-2"]), - ] - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=[ - #'cavediving-sat14-adl:testing01_easy.pddl', - #'childsnack-sat14-strips:child-snack_pfile05.pddl', - #'citycar-sat14-adl:p3-2-2-0-1.pddl', - #'ged-sat14-strips:d-3-6.pddl', - 'hiking-sat14-strips:ptesting-1-2-7.pddl', - #'maintenance-sat14-adl:maintenance-1-3-060-180-5-000.pddl', - #'tetris-sat14-strips:p020.pddl', - #'thoughtful-sat14-strips:bootstrap-typed-01.pddl', - #'transport-sat14-strips:p01.pddl', - ], - processes=4, - email='silvan.sievers@unibas.ch', - ) - - exp.add_absolute_report_step() - - exp() - -main(revisions=['issue602-v1']) diff --git a/experiments/issue602/v1-mco.py b/experiments/issue602/v1-mco.py deleted file mode 100755 index 9f27e0730f..0000000000 --- a/experiments/issue602/v1-mco.py +++ /dev/null @@ -1,99 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment - -SUITE_MCO14 = [ - 'barman-mco14-strips', - 'cavediving-mco14-adl', - 'childsnack-mco14-strips', - 'citycar-mco14-adl', - 'floortile-mco14-strips', - 'ged-mco14-strips', - 'hiking-mco14-strips', - 'maintenance-mco14-adl', - 'openstacks-mco14-strips', - 'parking-mco14-strips', - 'tetris-mco14-strips', - 'thoughtful-mco14-strips', - 'transport-mco14-strips', - 'visitall-mco14-strips', -] - -def main(revisions=None): - suite = SUITE_MCO14 - - configs = [ - IssueConfig("astar_goalcount", [ - "--search", - "astar(goalcount)"]), - IssueConfig("eager_greedy_ff", [ - "--heuristic", - "h=ff()", - "--search", - "eager_greedy(h, preferred=h)"]), - IssueConfig("eager_greedy_add", [ - "--heuristic", - "h=add()", - "--search", - "eager_greedy(h, preferred=h)"]), - IssueConfig("eager_greedy_cg", [ - "--heuristic", - "h=cg()", - "--search", - "eager_greedy(h, preferred=h)"]), - IssueConfig("eager_greedy_cea", [ - "--heuristic", - "h=cea()", - "--search", - "eager_greedy(h, preferred=h)"]), - IssueConfig("lazy_greedy_ff", [ - "--heuristic", - "h=ff()", - "--search", - "lazy_greedy(h, preferred=h)"]), - IssueConfig("lazy_greedy_add", [ - "--heuristic", - "h=add()", - "--search", - "lazy_greedy(h, preferred=h)"]), - IssueConfig("lazy_greedy_cg", [ - "--heuristic", - "h=cg()", - "--search", - "lazy_greedy(h, preferred=h)"]), - IssueConfig("seq_sat_lama_2011", [], driver_options=[ - "--alias", "seq-sat-lama-2011"]), - IssueConfig("seq_sat_fdss_1", [], driver_options=[ - "--alias", "seq-sat-fdss-1"]), - IssueConfig("seq_sat_fdss_2", [], driver_options=[ - "--alias", "seq-sat-fdss-2"]), - ] - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=[ - #'cavediving-sat14-adl:testing01_easy.pddl', - #'childsnack-sat14-strips:child-snack_pfile05.pddl', - #'citycar-sat14-adl:p3-2-2-0-1.pddl', - #'ged-sat14-strips:d-3-6.pddl', - 'hiking-sat14-strips:ptesting-1-2-7.pddl', - #'maintenance-sat14-adl:maintenance-1-3-060-180-5-000.pddl', - #'tetris-sat14-strips:p020.pddl', - #'thoughtful-sat14-strips:bootstrap-typed-01.pddl', - #'transport-sat14-strips:p01.pddl', - ], - processes=4, - email='silvan.sievers@unibas.ch', - ) - - exp.add_absolute_report_step() - - exp() - -main(revisions=['issue602-v1']) diff --git a/experiments/issue602/v1-opt.y b/experiments/issue602/v1-opt.y deleted file mode 100755 index 9ce603ac97..0000000000 --- a/experiments/issue602/v1-opt.y +++ /dev/null @@ -1,99 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment - -SUITE_OPT14 = [ - 'barman-opt14-strips', - 'cavediving-opt14-adl', - 'childsnack-opt14-strips', - 'citycar-opt14-adl', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'maintenance-opt14-adl', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', -] - -def main(revisions=None): - suite = SUITE_OPT14 - - configs = [ - IssueConfig("astar_blind", [ - "--search", - "astar(blind)"]), - IssueConfig("astar_h2", [ - "--search", - "astar(hm(2))"]), - IssueConfig("astar_ipdb", [ - "--search", - "astar(ipdb)"]), - IssueConfig("astar_lmcount_lm_merged_rhw_hm", [ - "--search", - "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)"]), - IssueConfig("astar_lmcut", [ - "--search", - "astar(lmcut)"]), - IssueConfig("astar_hmax", [ - "--search", - "astar(hmax)"]), - IssueConfig("astar_merge_and_shrink_rl_fh", [ - "--search", - "astar(merge_and_shrink(" - "merge_strategy=merge_linear(variable_order=reverse_level)," - "shrink_strategy=shrink_fh(max_states=50000)," - "label_reduction=exact(before_shrinking=false," - "before_merging=true)))"]), - IssueConfig("astar_merge_and_shrink_dfp_bisim", [ - "--search", - "astar(merge_and_shrink(merge_strategy=merge_dfp," - "shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1," - "greedy=false)," - "label_reduction=exact(before_shrinking=true," - "before_merging=false)))"]), - IssueConfig("astar_merge_and_shrink_dfp_greedy_bisim", [ - "--search", - "astar(merge_and_shrink(merge_strategy=merge_dfp," - "shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1," - "greedy=true)," - "label_reduction=exact(before_shrinking=true," - "before_merging=false)))"]), - IssueConfig("seq_opt_merge_and_shrink", [], driver_options=[ - "--alias", "seq-opt-merge-and-shrink"]), - IssueConfig("seq_opt_fdss_1", [], driver_options=[ - "--alias", "seq-opt-merge-and-shrink"]), - IssueConfig("seq_opt_fdss_2", [], driver_options=[ - "--alias", "seq-opt-fdss-2"]), - ] - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=[ - #'cavediving-opt14-adl:testing01_easy.pddl', - #'childsnack-opt14-strips:child-snack_pfile01.pddl', - #'citycar-opt14-adl:p2-2-2-1-2.pddl', - #'ged-opt14-strips:d-1-2.pddl', - 'hiking-opt14-strips:ptesting-1-2-3.pddl', - #'maintenance-opt14-adl:maintenance-1-3-010-010-2-000.pddl', - #'tetris-opt14-strips:p01-6.pddl', - #'transport-opt14-strips:p01.pddl', - ], - processes=4, - email='silvan.sievers@unibas.ch', - ) - - exp.add_absolute_report_step() - - exp() - -main(revisions=['issue602-v1']) diff --git a/experiments/issue602/v1-sat.py b/experiments/issue602/v1-sat.py deleted file mode 100755 index a20777d2c3..0000000000 --- a/experiments/issue602/v1-sat.py +++ /dev/null @@ -1,99 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment - -SUITE_SAT14 = [ - 'barman-sat14-strips', - 'cavediving-sat14-adl', - 'childsnack-sat14-strips', - 'citycar-sat14-adl', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'maintenance-sat14-adl', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', -] - -def main(revisions=None): - suite = SUITE_SAT14 - - configs = [ - IssueConfig("astar_goalcount", [ - "--search", - "astar(goalcount)"]), - IssueConfig("eager_greedy_ff", [ - "--heuristic", - "h=ff()", - "--search", - "eager_greedy(h, preferred=h)"]), - IssueConfig("eager_greedy_add", [ - "--heuristic", - "h=add()", - "--search", - "eager_greedy(h, preferred=h)"]), - IssueConfig("eager_greedy_cg", [ - "--heuristic", - "h=cg()", - "--search", - "eager_greedy(h, preferred=h)"]), - IssueConfig("eager_greedy_cea", [ - "--heuristic", - "h=cea()", - "--search", - "eager_greedy(h, preferred=h)"]), - IssueConfig("lazy_greedy_ff", [ - "--heuristic", - "h=ff()", - "--search", - "lazy_greedy(h, preferred=h)"]), - IssueConfig("lazy_greedy_add", [ - "--heuristic", - "h=add()", - "--search", - "lazy_greedy(h, preferred=h)"]), - IssueConfig("lazy_greedy_cg", [ - "--heuristic", - "h=cg()", - "--search", - "lazy_greedy(h, preferred=h)"]), - IssueConfig("seq_sat_lama_2011", [], driver_options=[ - "--alias", "seq-sat-lama-2011"]), - IssueConfig("seq_sat_fdss_1", [], driver_options=[ - "--alias", "seq-sat-fdss-1"]), - IssueConfig("seq_sat_fdss_2", [], driver_options=[ - "--alias", "seq-sat-fdss-2"]), - ] - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=[ - #'cavediving-sat14-adl:testing01_easy.pddl', - #'childsnack-sat14-strips:child-snack_pfile05.pddl', - #'citycar-sat14-adl:p3-2-2-0-1.pddl', - #'ged-sat14-strips:d-3-6.pddl', - 'hiking-sat14-strips:ptesting-1-2-7.pddl', - #'maintenance-sat14-adl:maintenance-1-3-060-180-5-000.pddl', - #'tetris-sat14-strips:p020.pddl', - #'thoughtful-sat14-strips:bootstrap-typed-01.pddl', - #'transport-sat14-strips:p01.pddl', - ], - processes=4, - email='silvan.sievers@unibas.ch', - ) - - exp.add_absolute_report_step() - - exp() - -main(revisions=['issue602-v1']) diff --git a/experiments/issue604/common_setup.py b/experiments/issue604/common_setup.py deleted file mode 100644 index 0b2eebe0ff..0000000000 --- a/experiments/issue604/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Wrapper for FastDownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, suite, revisions=[], configs={}, grid_priority=None, - path=None, test_suite=None, email=None, processes=1, - **kwargs): - """Create a DownwardExperiment with some convenience features. - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(os.path.join(repo, "benchmarks"), suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue604/ms-parser.py b/experiments/issue604/ms-parser.py deleted file mode 100755 index 46d0123552..0000000000 --- a/experiments/issue604/ms-parser.py +++ /dev/null @@ -1,71 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) -parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -parser.parse() diff --git a/experiments/issue604/relativescatter.py b/experiments/issue604/relativescatter.py deleted file mode 100644 index 41a8385a87..0000000000 --- a/experiments/issue604/relativescatter.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -# -# downward uses the lab package to conduct experiments with the -# Fast Downward planning system. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from collections import defaultdict -import os - -from lab import tools - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue604/v1.py b/experiments/issue604/v1.py deleted file mode 100755 index e1bc749086..0000000000 --- a/experiments/issue604/v1.py +++ /dev/null @@ -1,88 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - exp.add_report( - RelativeScatterPlotReport( - attributes=["memory"], - filter_config=["issue604-base-dfp-ginf", "issue604-v1-dfp-ginf"], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue604_base_v1_memory_dfp.png' - ) - - exp.add_report( - RelativeScatterPlotReport( - attributes=["memory"], - filter_config=["issue604-base-rl-ginf", "issue604-v1-rl-ginf"], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue604_base_v1_memory_rl.png' - ) - - exp() - -main(revisions=['issue604-base', 'issue604-v1']) diff --git a/experiments/issue604/v2.py b/experiments/issue604/v2.py deleted file mode 100755 index 02472df1b5..0000000000 --- a/experiments/issue604/v2.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - ms_memory_delta = Attribute('ms_memory_delta', absolute=False, min_wins=True) - - extra_attributes = [ - search_out_of_memory, - search_out_of_time, - perfect_heuristic, - proved_unsolvability, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - ms_memory_delta, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - exp() - -main(revisions=['issue604-v1', 'issue604-v2']) diff --git a/experiments/issue604/v3.py b/experiments/issue604/v3.py deleted file mode 100755 index 4c68fdeafd..0000000000 --- a/experiments/issue604/v3.py +++ /dev/null @@ -1,94 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm -from downward.reports.compare import CompareConfigsReport - -from common_setup import IssueConfig, IssueExperiment - -import os - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - ms_memory_delta = Attribute('ms_memory_delta', absolute=False, min_wins=True) - - extra_attributes = [ - search_out_of_memory, - search_out_of_time, - perfect_heuristic, - proved_unsolvability, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - ms_memory_delta, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_fetcher('data/issue604-v1-eval', filter_config=[ - 'issue604-v1-rl-b50k', - 'issue604-v1-cggl-b50k', - 'issue604-v1-dfp-b50k', - 'issue604-v1-rl-ginf', - 'issue604-v1-cggl-ginf', - 'issue604-v1-dfp-ginf', - 'issue604-v1-rl-f50k', - 'issue604-v1-cggl-f50k', - 'issue604-v1-dfp-f50k', - ]) - - exp.add_report(CompareConfigsReport(compared_configs=[ - ('issue604-v1-rl-b50k', 'issue604-v3-rl-b50k'), - ('issue604-v1-cggl-b50k', 'issue604-v3-cggl-b50k'), - ('issue604-v1-dfp-b50k', 'issue604-v3-dfp-b50k'), - ('issue604-v1-rl-ginf', 'issue604-v3-rl-ginf'), - ('issue604-v1-cggl-ginf', 'issue604-v3-cggl-ginf'), - ('issue604-v1-dfp-ginf', 'issue604-v3-dfp-ginf'), - ('issue604-v1-rl-f50k', 'issue604-v3-rl-f50k'), - ('issue604-v1-cggl-f50k', 'issue604-v3-cggl-f50k'), - ('issue604-v1-dfp-f50k', 'issue604-v3-dfp-f50k'), - ],attributes=attributes),outfile=os.path.join(exp.eval_dir, 'issue604-v1-v3-comparison.html')) - - exp() - -main(revisions=['issue604-v3']) diff --git a/experiments/issue604/v4.py b/experiments/issue604/v4.py deleted file mode 100755 index 560e9f5187..0000000000 --- a/experiments/issue604/v4.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - ms_memory_delta = Attribute('ms_memory_delta', absolute=False, min_wins=True) - - extra_attributes = [ - search_out_of_memory, - search_out_of_time, - perfect_heuristic, - proved_unsolvability, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - ms_memory_delta, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - exp() - -main(revisions=['issue604-v3', 'issue604-v4']) diff --git a/experiments/issue604/v5.py b/experiments/issue604/v5.py deleted file mode 100755 index 590e5c38e1..0000000000 --- a/experiments/issue604/v5.py +++ /dev/null @@ -1,98 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm -from downward.reports.compare import CompareConfigsReport - -from common_setup import IssueConfig, IssueExperiment - -import os - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - ms_memory_delta = Attribute('ms_memory_delta', absolute=False, min_wins=True) - - extra_attributes = [ - search_out_of_memory, - search_out_of_time, - perfect_heuristic, - proved_unsolvability, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - ms_memory_delta, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_fetcher('data/issue604-v4-eval') - - exp.add_report(CompareConfigsReport(compared_configs=[ - ('issue604-v3-rl-b50k', 'issue604-v5-rl-b50k'), - ('issue604-v3-cggl-b50k', 'issue604-v5-cggl-b50k'), - ('issue604-v3-dfp-b50k', 'issue604-v5-dfp-b50k'), - ('issue604-v3-rl-ginf', 'issue604-v5-rl-ginf'), - ('issue604-v3-cggl-ginf', 'issue604-v5-cggl-ginf'), - ('issue604-v3-dfp-ginf', 'issue604-v5-dfp-ginf'), - ('issue604-v3-rl-f50k', 'issue604-v5-rl-f50k'), - ('issue604-v3-cggl-f50k', 'issue604-v5-cggl-f50k'), - ('issue604-v3-dfp-f50k', 'issue604-v5-dfp-f50k'), - ],attributes=attributes),outfile=os.path.join( - exp.eval_dir, 'issue604-v3-v5-comparison.html')) - - exp.add_report(CompareConfigsReport(compared_configs=[ - ('issue604-v4-rl-b50k', 'issue604-v5-rl-b50k'), - ('issue604-v4-cggl-b50k', 'issue604-v5-cggl-b50k'), - ('issue604-v4-dfp-b50k', 'issue604-v5-dfp-b50k'), - ('issue604-v4-rl-ginf', 'issue604-v5-rl-ginf'), - ('issue604-v4-cggl-ginf', 'issue604-v5-cggl-ginf'), - ('issue604-v4-dfp-ginf', 'issue604-v5-dfp-ginf'), - ('issue604-v4-rl-f50k', 'issue604-v5-rl-f50k'), - ('issue604-v4-cggl-f50k', 'issue604-v5-cggl-f50k'), - ('issue604-v4-dfp-f50k', 'issue604-v5-dfp-f50k'), - ],attributes=attributes),outfile=os.path.join( - exp.eval_dir, 'issue604-v4-v5-comparison.html')) - - exp() - -main(revisions=['issue604-v5']) diff --git a/experiments/issue604/v6.py b/experiments/issue604/v6.py deleted file mode 100755 index 5b77ea7389..0000000000 --- a/experiments/issue604/v6.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - #IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - #IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - #IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - #IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - #IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - ms_memory_delta = Attribute('ms_memory_delta', absolute=False, min_wins=True) - - extra_attributes = [ - search_out_of_memory, - search_out_of_time, - perfect_heuristic, - proved_unsolvability, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - ms_memory_delta, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - exp() - -main(revisions=['issue604-v5', 'issue604-v6']) diff --git a/experiments/issue604/v7-base.py b/experiments/issue604/v7-base.py deleted file mode 100755 index 6d196f9482..0000000000 --- a/experiments/issue604/v7-base.py +++ /dev/null @@ -1,110 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm -from downward.reports.compare import CompareConfigsReport - -from common_setup import IssueConfig, IssueExperiment - -import os - -def main(revisions=[]): - suite = suites.suite_optimal_with_ipc11() - - configs = { - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - ms_memory_delta = Attribute('ms_memory_delta', absolute=False, min_wins=True) - - extra_attributes = [ - search_out_of_memory, - search_out_of_time, - perfect_heuristic, - proved_unsolvability, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - ms_memory_delta, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_fetcher('data/issue604-v1-eval',filter_config=[ - 'issue604-base-rl-b50k', - 'issue604-base-cggl-b50k', - 'issue604-base-dfp-b50k', - 'issue604-base-rl-ginf', - 'issue604-base-cggl-ginf', - 'issue604-base-dfp-ginf', - 'issue604-base-rl-f50k', - 'issue604-base-cggl-f50k', - 'issue604-base-dfp-f50k', - ]) - - exp.add_fetcher('data/issue604-v7-eval',filter_config=[ - 'issue604-v7-rl-b50k', - 'issue604-v7-cggl-b50k', - 'issue604-v7-dfp-b50k', - 'issue604-v7-rl-ginf', - 'issue604-v7-cggl-ginf', - 'issue604-v7-dfp-ginf', - 'issue604-v7-rl-f50k', - 'issue604-v7-cggl-f50k', - 'issue604-v7-dfp-f50k', - ]) - - exp.add_fetcher('data/issue604-v7-rest-eval',filter_config=[ - 'issue604-v7-rl-b50k', - 'issue604-v7-cggl-b50k', - 'issue604-v7-dfp-b50k', - 'issue604-v7-rl-ginf', - 'issue604-v7-cggl-ginf', - 'issue604-v7-dfp-ginf', - 'issue604-v7-rl-f50k', - 'issue604-v7-cggl-f50k', - 'issue604-v7-dfp-f50k', - ]) - - exp.add_report(CompareConfigsReport(compared_configs=[ - ('issue604-base-rl-b50k', 'issue604-v7-rl-b50k'), - ('issue604-base-cggl-b50k', 'issue604-v7-cggl-b50k'), - ('issue604-base-dfp-b50k', 'issue604-v7-dfp-b50k'), - ('issue604-base-rl-ginf', 'issue604-v7-rl-ginf'), - ('issue604-base-cggl-ginf', 'issue604-v7-cggl-ginf'), - ('issue604-base-dfp-ginf', 'issue604-v7-dfp-ginf'), - ('issue604-base-rl-f50k', 'issue604-v7-rl-f50k'), - ('issue604-base-cggl-f50k', 'issue604-v7-cggl-f50k'), - ('issue604-base-dfp-f50k', 'issue604-v7-dfp-f50k'), - ],attributes=attributes),outfile=os.path.join( - exp.eval_dir, 'issue604-base-v7-comparison.html')) - - exp() - -main() diff --git a/experiments/issue604/v7-rest.py b/experiments/issue604/v7-rest.py deleted file mode 100755 index ff316e3974..0000000000 --- a/experiments/issue604/v7-rest.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - #IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - #IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - #IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - #IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - ms_memory_delta = Attribute('ms_memory_delta', absolute=False, min_wins=True) - - extra_attributes = [ - search_out_of_memory, - search_out_of_time, - perfect_heuristic, - proved_unsolvability, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - ms_memory_delta, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - exp() - -main(revisions=['issue604-v6', 'issue604-v7']) diff --git a/experiments/issue604/v7.py b/experiments/issue604/v7.py deleted file mode 100755 index 742f64887c..0000000000 --- a/experiments/issue604/v7.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - #IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - #IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - #IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - #IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - #IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - ms_memory_delta = Attribute('ms_memory_delta', absolute=False, min_wins=True) - - extra_attributes = [ - search_out_of_memory, - search_out_of_time, - perfect_heuristic, - proved_unsolvability, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - ms_memory_delta, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - exp() - -main(revisions=['issue604-v6', 'issue604-v7']) diff --git a/experiments/issue611/peak-memory-microbenchmark/.gitignore b/experiments/issue611/peak-memory-microbenchmark/.gitignore deleted file mode 100644 index 10e7a1e57c..0000000000 --- a/experiments/issue611/peak-memory-microbenchmark/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -/.obj/ -/benchmark -/Makefile.depend diff --git a/experiments/issue611/peak-memory-microbenchmark/Makefile b/experiments/issue611/peak-memory-microbenchmark/Makefile deleted file mode 100644 index 7682214f17..0000000000 --- a/experiments/issue611/peak-memory-microbenchmark/Makefile +++ /dev/null @@ -1,146 +0,0 @@ -DOWNWARD_BITWIDTH=32 - -HEADERS = \ - system.h \ - system_unix.h - -SOURCES = main.cc $(HEADERS:%.h=%.cc) -TARGET = benchmark - -default: release - -OBJECT_SUFFIX_RELEASE = .release -TARGET_SUFFIX_RELEASE = -OBJECT_SUFFIX_DEBUG = .debug -TARGET_SUFFIX_DEBUG = -debug -OBJECT_SUFFIX_PROFILE = .profile -TARGET_SUFFIX_PROFILE = -profile - -OBJECTS_RELEASE = $(SOURCES:%.cc=.obj/%$(OBJECT_SUFFIX_RELEASE).o) -TARGET_RELEASE = $(TARGET)$(TARGET_SUFFIX_RELEASE) - -OBJECTS_DEBUG = $(SOURCES:%.cc=.obj/%$(OBJECT_SUFFIX_DEBUG).o) -TARGET_DEBUG = $(TARGET)$(TARGET_SUFFIX_DEBUG) - -OBJECTS_PROFILE = $(SOURCES:%.cc=.obj/%$(OBJECT_SUFFIX_PROFILE).o) -TARGET_PROFILE = $(TARGET)$(TARGET_SUFFIX_PROFILE) - -DEPEND = $(CXX) -MM - -## CXXFLAGS, LDFLAGS, POSTLINKOPT are options for compiler and linker -## that are used for all three targets (release, debug, and profile). -## (POSTLINKOPT are options that appear *after* all object files.) - -ifeq ($(DOWNWARD_BITWIDTH), 32) - BITWIDTHOPT = -m32 -else ifeq ($(DOWNWARD_BITWIDTH), 64) - BITWIDTHOPT = -m64 -else ifneq ($(DOWNWARD_BITWIDTH), native) - $(error Bad value for DOWNWARD_BITWIDTH) -endif - -CXXFLAGS = -CXXFLAGS += -g -CXXFLAGS += $(BITWIDTHOPT) -# Note: we write "-std=c++0x" rather than "-std=c++11" to support gcc 4.4. -CXXFLAGS += -std=c++0x -Wall -Wextra -pedantic -Wno-deprecated -Werror - -LDFLAGS = -LDFLAGS += $(BITWIDTHOPT) -LDFLAGS += -g - -POSTLINKOPT = - -CXXFLAGS_RELEASE = -O3 -DNDEBUG -fomit-frame-pointer -CXXFLAGS_DEBUG = -O3 -CXXFLAGS_PROFILE = -O3 -pg - -LDFLAGS_RELEASE = -LDFLAGS_DEBUG = -LDFLAGS_PROFILE = -pg - -POSTLINKOPT_RELEASE = -POSTLINKOPT_DEBUG = -POSTLINKOPT_PROFILE = - -LDFLAGS_RELEASE += -static -static-libgcc - -POSTLINKOPT_RELEASE += -Wl,-Bstatic -lrt -POSTLINKOPT_DEBUG += -lrt -POSTLINKOPT_PROFILE += -lrt - -all: release debug profile - -## Build rules for the release target follow. - -release: $(TARGET_RELEASE) - -$(TARGET_RELEASE): $(OBJECTS_RELEASE) - $(CXX) $(LDFLAGS) $(LDFLAGS_RELEASE) $(OBJECTS_RELEASE) $(POSTLINKOPT) $(POSTLINKOPT_RELEASE) -o $(TARGET_RELEASE) - -$(OBJECTS_RELEASE): .obj/%$(OBJECT_SUFFIX_RELEASE).o: %.cc - @mkdir -p $$(dirname $@) - $(CXX) $(CXXFLAGS) $(CXXFLAGS_RELEASE) -c $< -o $@ - -## Build rules for the debug target follow. - -debug: $(TARGET_DEBUG) - -$(TARGET_DEBUG): $(OBJECTS_DEBUG) - $(CXX) $(LDFLAGS) $(LDFLAGS_DEBUG) $(OBJECTS_DEBUG) $(POSTLINKOPT) $(POSTLINKOPT_DEBUG) -o $(TARGET_DEBUG) - -$(OBJECTS_DEBUG): .obj/%$(OBJECT_SUFFIX_DEBUG).o: %.cc - @mkdir -p $$(dirname $@) - $(CXX) $(CXXFLAGS) $(CXXFLAGS_DEBUG) -c $< -o $@ - -## Build rules for the profile target follow. - -profile: $(TARGET_PROFILE) - -$(TARGET_PROFILE): $(OBJECTS_PROFILE) - $(CXX) $(LDFLAGS) $(LDFLAGS_PROFILE) $(OBJECTS_PROFILE) $(POSTLINKOPT) $(POSTLINKOPT_PROFILE) -o $(TARGET_PROFILE) - -$(OBJECTS_PROFILE): .obj/%$(OBJECT_SUFFIX_PROFILE).o: %.cc - @mkdir -p $$(dirname $@) - $(CXX) $(CXXFLAGS) $(CXXFLAGS_PROFILE) -c $< -o $@ - -## Additional targets follow. - -PROFILE: $(TARGET_PROFILE) - ./$(TARGET_PROFILE) $(ARGS_PROFILE) - gprof $(TARGET_PROFILE) | (cleanup-profile 2> /dev/null || cat) > PROFILE - -clean: - rm -rf .obj - rm -f *~ *.pyc - rm -f Makefile.depend gmon.out PROFILE core - rm -f sas_plan - -distclean: clean - rm -f $(TARGET_RELEASE) $(TARGET_DEBUG) $(TARGET_PROFILE) - -## NOTE: If we just call gcc -MM on a source file that lives within a -## subdirectory, it will strip the directory part in the output. Hence -## the for loop with the sed call. - -Makefile.depend: $(SOURCES) $(HEADERS) - rm -f Makefile.temp - for source in $(SOURCES) ; do \ - $(DEPEND) $(CXXFLAGS) $$source > Makefile.temp0; \ - objfile=$${source%%.cc}.o; \ - sed -i -e "s@^[^:]*:@$$objfile:@" Makefile.temp0; \ - cat Makefile.temp0 >> Makefile.temp; \ - done - rm -f Makefile.temp0 Makefile.depend - sed -e "s@\(.*\)\.o:\(.*\)@.obj/\1$(OBJECT_SUFFIX_RELEASE).o:\2@" Makefile.temp >> Makefile.depend - sed -e "s@\(.*\)\.o:\(.*\)@.obj/\1$(OBJECT_SUFFIX_DEBUG).o:\2@" Makefile.temp >> Makefile.depend - sed -e "s@\(.*\)\.o:\(.*\)@.obj/\1$(OBJECT_SUFFIX_PROFILE).o:\2@" Makefile.temp >> Makefile.depend - rm -f Makefile.temp - -ifneq ($(MAKECMDGOALS),clean) - ifneq ($(MAKECMDGOALS),distclean) - -include Makefile.depend - endif -endif - -.PHONY: default all release debug profile clean distclean diff --git a/experiments/issue611/peak-memory-microbenchmark/language.h b/experiments/issue611/peak-memory-microbenchmark/language.h deleted file mode 100644 index d0836c46b5..0000000000 --- a/experiments/issue611/peak-memory-microbenchmark/language.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef UTILS_LANGUAGE_H -#define UTILS_LANGUAGE_H - -// TODO: this should depend on the compiler, not on the OS. -#if defined(_WIN32) -#define NO_RETURN __declspec(noreturn) -#else -#define NO_RETURN __attribute__((noreturn)) -#endif - -namespace Utils { -template -void unused_parameter(const T &) { -} -} - -#endif diff --git a/experiments/issue611/peak-memory-microbenchmark/main.cc b/experiments/issue611/peak-memory-microbenchmark/main.cc deleted file mode 100644 index 0feebf473d..0000000000 --- a/experiments/issue611/peak-memory-microbenchmark/main.cc +++ /dev/null @@ -1,38 +0,0 @@ -#include -#include -#include - -#include - -#include "system.h" - -using namespace std; -using namespace Utils; - - -void benchmark(const string &desc, int num_calls, - const function &func) { - cout << "Running " << desc << " " << num_calls << " times:" << flush; - clock_t start = clock(); - for (int i = 0; i < num_calls; ++i) - func(); - clock_t end = clock(); - double duration = static_cast(end - start) / CLOCKS_PER_SEC; - cout << " " << duration << " seconds" << endl; -} - - -int main(int, char **) { - // const int NUM_ITERATIONS = 100000000; - const int NUM_ITERATIONS = 1000000; - - benchmark("nothing", NUM_ITERATIONS, [] () {}); - benchmark("get_peak_memory_in_kb", - NUM_ITERATIONS, - [&]() {get_peak_memory_in_kb();}); - benchmark("sbrk", - NUM_ITERATIONS, - [&]() {sbrk(0);}); - cout << endl; - return 0; -} diff --git a/experiments/issue611/peak-memory-microbenchmark/system.cc b/experiments/issue611/peak-memory-microbenchmark/system.cc deleted file mode 100644 index fa4df89594..0000000000 --- a/experiments/issue611/peak-memory-microbenchmark/system.cc +++ /dev/null @@ -1,45 +0,0 @@ -#include "system.h" - - -namespace Utils { -const char *get_exit_code_message_reentrant(ExitCode exitcode) { - switch (exitcode) { - case ExitCode::PLAN_FOUND: - return "Solution found."; - case ExitCode::CRITICAL_ERROR: - return "Unexplained error occurred."; - case ExitCode::INPUT_ERROR: - return "Usage error occurred."; - case ExitCode::UNSUPPORTED: - return "Tried to use unsupported feature."; - case ExitCode::UNSOLVABLE: - return "Task is provably unsolvable."; - case ExitCode::UNSOLVED_INCOMPLETE: - return "Search stopped without finding a solution."; - case ExitCode::OUT_OF_MEMORY: - return "Memory limit has been reached."; - default: - return nullptr; - } -} - -bool is_exit_code_error_reentrant(ExitCode exitcode) { - switch (exitcode) { - case ExitCode::PLAN_FOUND: - case ExitCode::UNSOLVABLE: - case ExitCode::UNSOLVED_INCOMPLETE: - case ExitCode::OUT_OF_MEMORY: - return false; - case ExitCode::CRITICAL_ERROR: - case ExitCode::INPUT_ERROR: - case ExitCode::UNSUPPORTED: - default: - return true; - } -} - -void exit_with(ExitCode exitcode) { - report_exit_code_reentrant(exitcode); - exit(static_cast(exitcode)); -} -} diff --git a/experiments/issue611/peak-memory-microbenchmark/system.h b/experiments/issue611/peak-memory-microbenchmark/system.h deleted file mode 100644 index 8675138d65..0000000000 --- a/experiments/issue611/peak-memory-microbenchmark/system.h +++ /dev/null @@ -1,56 +0,0 @@ -#ifndef UTILS_SYSTEM_H -#define UTILS_SYSTEM_H - -#define LINUX 0 -#define OSX 1 -#define WINDOWS 2 - -#if defined(_WIN32) -#define OPERATING_SYSTEM WINDOWS -#include "system_windows.h" -#elif defined(__APPLE__) -#define OPERATING_SYSTEM OSX -#include "system_unix.h" -#else -#define OPERATING_SYSTEM LINUX -#include "system_unix.h" -#endif - -#include "language.h" - -#include - -#define ABORT(msg) \ - ( \ - (std::cerr << "Critical error in file " << __FILE__ \ - << ", line " << __LINE__ << ": " << std::endl \ - << (msg) << std::endl), \ - (abort()), \ - (void)0 \ - ) - - -namespace Utils { -enum class ExitCode { - PLAN_FOUND = 0, - CRITICAL_ERROR = 1, - INPUT_ERROR = 2, - UNSUPPORTED = 3, - // Task is provably unsolvable with current bound. Currently unused (see issue377). - UNSOLVABLE = 4, - // Search ended without finding a solution. - UNSOLVED_INCOMPLETE = 5, - OUT_OF_MEMORY = 6 -}; - -NO_RETURN extern void exit_with(ExitCode returncode); - -int get_peak_memory_in_kb(); -const char *get_exit_code_message_reentrant(ExitCode exitcode); -bool is_exit_code_error_reentrant(ExitCode exitcode); -void register_event_handlers(); -void report_exit_code_reentrant(ExitCode exitcode); -int get_process_id(); -} - -#endif diff --git a/experiments/issue611/peak-memory-microbenchmark/system_unix.cc b/experiments/issue611/peak-memory-microbenchmark/system_unix.cc deleted file mode 100644 index a7bcfdf55a..0000000000 --- a/experiments/issue611/peak-memory-microbenchmark/system_unix.cc +++ /dev/null @@ -1,250 +0,0 @@ -#include "system.h" - -#if OPERATING_SYSTEM == LINUX || OPERATING_SYSTEM == OSX -/* - NOTE: - Methods with the suffix "_reentrant" are meant to be used in event - handlers. They should all be "re-entrant", i.e. they must not use - static variables, global data, or locks. Only some system calls such as - open/read/write/close are guaranteed to be re-entrant. See - https://www.securecoding.cert.org/confluence/display/seccode/ - SIG30-C.+Call+only+asynchronous-safe+functions+within+signal+handlers - #SIG30-C.Callonlyasynchronous-safefunctionswithinsignalhandlers- - Asynchronous-Signal-SafeFunctions - for a complete list. - We also use some low level string methods where re-entrancy is not - guaranteed but very likely with most compilers. If these ever cause - any problems, we will have to replace them by re-entrant - implementations. - - See also: issue479 -*/ - -#include "system_unix.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace std; - - -namespace Utils { -void write_reentrant(int filedescr, const char *message, int len) { - while (len > 0) { - int written = TEMP_FAILURE_RETRY(write(filedescr, message, len)); - /* - We could check the value of errno here but all errors except EINTR - are catastrophic enough to abort, so we do not need the distintion. - The error EINTR is handled by the macro TEMP_FAILURE_RETRY. - */ - if (written == -1) - abort(); - message += written; - len -= written; - } -} - -void write_reentrant_str(int filedescr, const char *message) { - write_reentrant(filedescr, message, strlen(message)); -} - -void write_reentrant_char(int filedescr, char c) { - write_reentrant(filedescr, &c, 1); -} - -void write_reentrant_int(int filedescr, int value) { - char buffer[32]; - int len = snprintf(buffer, sizeof(buffer), "%d", value); - if (len < 0) - abort(); - write_reentrant(filedescr, buffer, len); -} - -bool read_char_reentrant(int filedescr, char *c) { - int result = TEMP_FAILURE_RETRY(read(filedescr, c, 1)); - /* - We could check the value of errno here but all errors except EINTR - are catastrophic enough to abort, so we do not need the distinction. - The error EINTR is handled by the macro TEMP_FAILURE_RETRY. - */ - if (result == -1) - abort(); - return result == 1; -} - -void print_peak_memory_reentrant() { - int proc_file_descr = TEMP_FAILURE_RETRY(open("/proc/self/status", O_RDONLY)); - if (proc_file_descr == -1) { - write_reentrant_str( - STDERR_FILENO, - "critical error: could not open /proc/self/status\n"); - abort(); - } - - const char magic[] = "\nVmPeak:"; - char c; - size_t pos_magic = 0; - const size_t len_magic = sizeof(magic) - 1; - - // Find magic word. - while (pos_magic != len_magic && read_char_reentrant(proc_file_descr, &c)) { - if (c == magic[pos_magic]) { - ++pos_magic; - } else { - pos_magic = 0; - } - } - - if (pos_magic != len_magic) { - write_reentrant_str( - STDERR_FILENO, - "critical error: could not find VmPeak in /proc/self/status\n"); - abort(); - } - - write_reentrant_str(STDOUT_FILENO, "Peak memory: "); - - // Skip over whitespace. - while (read_char_reentrant(proc_file_descr, &c) && isspace(c)) - ; - - do { - write_reentrant_char(STDOUT_FILENO, c); - } while (read_char_reentrant(proc_file_descr, &c) && !isspace(c)); - - write_reentrant_str(STDOUT_FILENO, " KB\n"); - /* - Ignore potential errors other than EINTR (there is nothing we can do - about I/O errors or bad file descriptors here). - */ - TEMP_FAILURE_RETRY(close(proc_file_descr)); -} - -#if OPERATING_SYSTEM == LINUX -void exit_handler(int, void *) { -#elif OPERATING_SYSTEM == OSX -void exit_handler() { -#endif - print_peak_memory_reentrant(); -} - -void out_of_memory_handler() { - /* - We do not use any memory padding currently. The methods below should - only use stack memory. If we ever run into situations where the stack - memory is not sufficient, we can consider using sigaltstack to reserve - memory for the stack of the signal handler and raising a signal here. - */ - write_reentrant_str(STDOUT_FILENO, "Failed to allocate memory.\n"); - exit_with(ExitCode::OUT_OF_MEMORY); -} - -void signal_handler(int signal_number) { - print_peak_memory_reentrant(); - write_reentrant_str(STDOUT_FILENO, "caught signal "); - write_reentrant_int(STDOUT_FILENO, signal_number); - write_reentrant_str(STDOUT_FILENO, " -- exiting\n"); - raise(signal_number); -} - -/* - NOTE: we have two variants of obtaining peak memory information. - get_peak_memory_in_kb() is used during the regular execution. - print_peak_memory_in_kb_reentrant() is used in signal handlers. - The latter is slower but guarantees reentrancy. -*/ -int get_peak_memory_in_kb() { - // On error, produces a warning on cerr and returns -1. - int memory_in_kb = -1; - -#if OPERATING_SYSTEM == OSX - // Based on http://stackoverflow.com/questions/63166 - task_basic_info t_info; - mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_COUNT; - - if (task_info(mach_task_self(), TASK_BASIC_INFO, - reinterpret_cast(&t_info), - &t_info_count) == KERN_SUCCESS) - memory_in_kb = t_info.virtual_size / 1024; -#else - ifstream procfile; - procfile.open("/proc/self/status"); - string word; - while (procfile.good()) { - procfile >> word; - if (word == "VmPeak:") { - procfile >> memory_in_kb; - break; - } - // Skip to end of line. - procfile.ignore(numeric_limits::max(), '\n'); - } - if (procfile.fail()) - memory_in_kb = -1; -#endif - - if (memory_in_kb == -1) - cerr << "warning: could not determine peak memory" << endl; - return memory_in_kb; -} - -void register_event_handlers() { - // Terminate when running out of memory. - set_new_handler(out_of_memory_handler); - - // On exit or when receiving certain signals such as SIGINT (Ctrl-C), - // print the peak memory usage. -#if OPERATING_SYSTEM == LINUX - on_exit(exit_handler, 0); -#elif OPERATING_SYSTEM == OSX - atexit(exit_handler); -#endif - struct sigaction default_signal_action; - default_signal_action.sa_handler = signal_handler; - // Block all signals we handle while one of them is handled. - sigemptyset(&default_signal_action.sa_mask); - sigaddset(&default_signal_action.sa_mask, SIGABRT); - sigaddset(&default_signal_action.sa_mask, SIGTERM); - sigaddset(&default_signal_action.sa_mask, SIGSEGV); - sigaddset(&default_signal_action.sa_mask, SIGINT); - sigaddset(&default_signal_action.sa_mask, SIGXCPU); - // Reset handler to default action after completion. - default_signal_action.sa_flags = SA_RESETHAND; - - sigaction(SIGABRT, &default_signal_action, 0); - sigaction(SIGTERM, &default_signal_action, 0); - sigaction(SIGSEGV, &default_signal_action, 0); - sigaction(SIGINT, &default_signal_action, 0); - sigaction(SIGXCPU, &default_signal_action, 0); -} - -void report_exit_code_reentrant(ExitCode exitcode) { - const char *message = get_exit_code_message_reentrant(exitcode); - bool is_error = is_exit_code_error_reentrant(exitcode); - if (message) { - int filedescr = is_error ? STDERR_FILENO : STDOUT_FILENO; - write_reentrant_str(filedescr, message); - write_reentrant_char(filedescr, '\n'); - } else { - write_reentrant_str(STDERR_FILENO, "Exitcode: "); - write_reentrant_int(STDERR_FILENO, static_cast(exitcode)); - write_reentrant_str(STDERR_FILENO, "\nUnknown exitcode.\n"); - abort(); - } -} - -int get_process_id() { - return getpid(); -} -} - -#endif diff --git a/experiments/issue611/peak-memory-microbenchmark/system_unix.h b/experiments/issue611/peak-memory-microbenchmark/system_unix.h deleted file mode 100644 index d71b46e275..0000000000 --- a/experiments/issue611/peak-memory-microbenchmark/system_unix.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef UTILS_SYSTEM_UNIX_H -#define UTILS_SYSTEM_UNIX_H - -#endif diff --git a/experiments/issue621/common_setup.py b/experiments/issue621/common_setup.py deleted file mode 100644 index 6043bc4595..0000000000 --- a/experiments/issue621/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Wrapper for FastDownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, suite, revisions=[], configs={}, grid_priority=None, - path=None, test_suite=None, email=None, processes=None, - **kwargs): - """Create a DownwardExperiment with some convenience features. - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(os.path.join(repo, "benchmarks"), suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue621/relativescatter.py b/experiments/issue621/relativescatter.py deleted file mode 100644 index ef06364a64..0000000000 --- a/experiments/issue621/relativescatter.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict -import os - -from lab import tools - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue621/v1.py b/experiments/issue621/v1.py deleted file mode 100755 index 0671882e1a..0000000000 --- a/experiments/issue621/v1.py +++ /dev/null @@ -1,39 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - - -configs = [ - IssueConfig( - "cegar-10K-original", - ["--search", "astar(cegar(subtasks=[original()],max_states=10000,max_time=infinity))"]), -] -revisions = ["issue621-base", "issue621-v1"] - -exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suites.suite_optimal_with_ipc11(), - test_suite=["depot:pfile1"], - email="jendrik.seipp@unibas.ch", -) - -exp.add_comparison_table_step() - -for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - -exp() diff --git a/experiments/issue627/common_setup.py b/experiments/issue627/common_setup.py deleted file mode 100644 index 0b2eebe0ff..0000000000 --- a/experiments/issue627/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Wrapper for FastDownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, suite, revisions=[], configs={}, grid_priority=None, - path=None, test_suite=None, email=None, processes=1, - **kwargs): - """Create a DownwardExperiment with some convenience features. - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(os.path.join(repo, "benchmarks"), suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue627/common_setup_no_benchmarks.py b/experiments/issue627/common_setup_no_benchmarks.py deleted file mode 100644 index 934531f15d..0000000000 --- a/experiments/issue627/common_setup_no_benchmarks.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, - grid_priority=None, path=None, test_suite=None, - email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(benchmarks_dir, suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue627/merge-v3-v4.py b/experiments/issue627/merge-v3-v4.py deleted file mode 100755 index 772541b80e..0000000000 --- a/experiments/issue627/merge-v3-v4.py +++ /dev/null @@ -1,46 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup_no_benchmarks import IssueConfig, IssueExperiment, get_script_dir -from relativescatter import RelativeScatterPlotReport - -import os - -def main(revisions=None): - exp = IssueExperiment(benchmarks_dir=".", suite=[]) - - exp.add_fetcher( - os.path.join(get_script_dir(), "data", "issue627-v3-eval"), - filter=lambda(run): "base" not in run["config"], - ) - exp.add_fetcher( - os.path.join(get_script_dir(), "data", "issue627-v4-eval"), - filter=lambda(run): "base" not in run["config"], - ) - - for config_nick in ['astar-blind', 'astar-lmcut', 'astar-ipdb', 'astar-cegar-original', 'astar-cegar-lm-goals']: - exp.add_report( - RelativeScatterPlotReport( - attributes=["memory"], - filter_config=["issue627-v3-%s" % config_nick, - "issue627-v4-%s" % config_nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_v3_v4_memory_%s.png' % config_nick - ) - - exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=["issue627-v3-%s" % config_nick, - "issue627-v4-%s" % config_nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_v3_v4_total_time_%s.png' % config_nick - ) - - exp() - -main(revisions=['issue627-v3', 'issue627-v4']) diff --git a/experiments/issue627/merge-v3-v5.py b/experiments/issue627/merge-v3-v5.py deleted file mode 100755 index 100f1f2a11..0000000000 --- a/experiments/issue627/merge-v3-v5.py +++ /dev/null @@ -1,46 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup_no_benchmarks import IssueConfig, IssueExperiment, get_script_dir -from relativescatter import RelativeScatterPlotReport - -import os - -def main(revisions=None): - exp = IssueExperiment(benchmarks_dir=".", suite=[]) - - exp.add_fetcher( - os.path.join(get_script_dir(), "data", "issue627-v3-eval"), - filter=lambda(run): "base" not in run["config"], - ) - exp.add_fetcher( - os.path.join(get_script_dir(), "data", "issue627-v5-eval"), - filter=lambda(run): "base" not in run["config"], - ) - - for config_nick in ['astar-blind', 'astar-lmcut', 'astar-ipdb', 'astar-cegar-original', 'astar-cegar-lm-goals']: - exp.add_report( - RelativeScatterPlotReport( - attributes=["memory"], - filter_config=["issue627-v3-%s" % config_nick, - "issue627-v5-%s" % config_nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_v3_v5_memory_%s.png' % config_nick - ) - - exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=["issue627-v3-%s" % config_nick, - "issue627-v5-%s" % config_nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_v3_v5_total_time_%s.png' % config_nick - ) - - exp() - -main(revisions=['issue627-v3', 'issue627-v5']) diff --git a/experiments/issue627/relativescatter.py b/experiments/issue627/relativescatter.py deleted file mode 100644 index 41a8385a87..0000000000 --- a/experiments/issue627/relativescatter.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -# -# downward uses the lab package to conduct experiments with the -# Fast Downward planning system. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from collections import defaultdict -import os - -from lab import tools - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue627/v1-limit.py b/experiments/issue627/v1-limit.py deleted file mode 100755 index 7a2afe243b..0000000000 --- a/experiments/issue627/v1-limit.py +++ /dev/null @@ -1,51 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('astar-cegar-original-10000', ['--search', 'astar(cegar(subtasks=[original()],max_states=10000,max_time=infinity))']), - IssueConfig('astar-cegar-lm-goals-10000', ['--search', 'astar(cegar(subtasks=[landmarks(),goals()],max_states=10000,max_time=infinity))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=["memory"], - filter_config=["issue627-base-%s" % config.nick, - "issue627-v1-%s" % config.nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_base_v1_memory_%s.png' % config.nick - ) - - exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=["issue627-base-%s" % config.nick, - "issue627-v1-%s" % config.nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_base_v1_total_time_%s.png' % config.nick - ) - - exp() - -main(revisions=['issue627-base', 'issue627-v1']) diff --git a/experiments/issue627/v1-noise.py b/experiments/issue627/v1-noise.py deleted file mode 100755 index 7c6c56fe75..0000000000 --- a/experiments/issue627/v1-noise.py +++ /dev/null @@ -1,51 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('astar-cegar-original-10000', ['--search', 'astar(cegar(subtasks=[original()],max_states=10000,max_time=infinity))']), - IssueConfig('astar-cegar-lm-goals-10000', ['--search', 'astar(cegar(subtasks=[landmarks(),goals()],max_states=10000,max_time=infinity))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=["memory"], - filter_config=["issue627-base-%s" % config.nick, - "4ed2abfab4ba-%s" % config.nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_base_memory_%s.png' % config.nick - ) - - exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=["issue627-base-%s" % config.nick, - "4ed2abfab4ba-%s" % config.nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_base_total_time_%s.png' % config.nick - ) - - exp() - -main(revisions=['issue627-base', '4ed2abfab4ba']) diff --git a/experiments/issue627/v1.py b/experiments/issue627/v1.py deleted file mode 100755 index 7841e17755..0000000000 --- a/experiments/issue627/v1.py +++ /dev/null @@ -1,51 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('astar-cegar-original', ['--search', 'astar(cegar(subtasks=[original()]))']), - IssueConfig('astar-cegar-lm-goals', ['--search', 'astar(cegar(subtasks=[landmarks(),goals()]))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=["memory"], - filter_config=["issue627-base-%s" % config.nick, - "issue627-v1-%s" % config.nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_base_v1_memory_%s.png' % config.nick - ) - - exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=["issue627-base-%s" % config.nick, - "issue627-v1-%s" % config.nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_base_v1_total_time_%s.png' % config.nick - ) - - exp() - -main(revisions=['issue627-base', 'issue627-v1']) diff --git a/experiments/issue627/v2.py b/experiments/issue627/v2.py deleted file mode 100755 index 805a6a7f36..0000000000 --- a/experiments/issue627/v2.py +++ /dev/null @@ -1,54 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('astar-blind', ['--search', 'astar(blind())']), - IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']), - IssueConfig('astar-ipdb', ['--search', 'astar(ipdb())']), - IssueConfig('astar-cegar-original', ['--search', 'astar(cegar(subtasks=[original()]))']), - IssueConfig('astar-cegar-lm-goals', ['--search', 'astar(cegar(subtasks=[landmarks(),goals()]))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=["memory"], - filter_config=["issue627-base-%s" % config.nick, - "issue627-v2-%s" % config.nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_base_v2_memory_%s.png' % config.nick - ) - - exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=["issue627-base-%s" % config.nick, - "issue627-v2-%s" % config.nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_base_v2_total_time_%s.png' % config.nick - ) - - exp() - -main(revisions=['issue627-base', 'issue627-v2']) diff --git a/experiments/issue627/v3.py b/experiments/issue627/v3.py deleted file mode 100755 index f2c77adbbf..0000000000 --- a/experiments/issue627/v3.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup_no_benchmarks import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('astar-blind', ['--search', 'astar(blind())']), - IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']), - IssueConfig('astar-ipdb', ['--search', 'astar(ipdb())']), - IssueConfig('astar-cegar-original', ['--search', 'astar(cegar(subtasks=[original()], max_states=10000, max_time=infinity))']), - IssueConfig('astar-cegar-lm-goals', ['--search', 'astar(cegar(subtasks=[landmarks(),goals()], max_states=10000, max_time=infinity))']), - } - - exp = IssueExperiment( - benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/", - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=["memory"], - filter_config=["issue627-v3-base-%s" % config.nick, - "issue627-v3-%s" % config.nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_base_v3_memory_%s.png' % config.nick - ) - - exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=["issue627-v3-base-%s" % config.nick, - "issue627-v3-%s" % config.nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_base_v3_total_time_%s.png' % config.nick - ) - - exp() - -main(revisions=['issue627-v3-base', 'issue627-v3']) diff --git a/experiments/issue627/v4.py b/experiments/issue627/v4.py deleted file mode 100755 index c226bca1eb..0000000000 --- a/experiments/issue627/v4.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup_no_benchmarks import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('astar-blind', ['--search', 'astar(blind())']), - IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']), - IssueConfig('astar-ipdb', ['--search', 'astar(ipdb())']), - IssueConfig('astar-cegar-original', ['--search', 'astar(cegar(subtasks=[original()], max_states=10000, max_time=infinity))']), - IssueConfig('astar-cegar-lm-goals', ['--search', 'astar(cegar(subtasks=[landmarks(),goals()], max_states=10000, max_time=infinity))']), - } - - exp = IssueExperiment( - benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/", - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=["memory"], - filter_config=["issue627-v3-base-%s" % config.nick, - "issue627-v4-%s" % config.nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_base_v4_memory_%s.png' % config.nick - ) - - exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=["issue627-v3-base-%s" % config.nick, - "issue627-v4-%s" % config.nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_base_v4_total_time_%s.png' % config.nick - ) - - exp() - -main(revisions=['issue627-v3-base', 'issue627-v4']) diff --git a/experiments/issue627/v5-sat.py b/experiments/issue627/v5-sat.py deleted file mode 100755 index af5736c8ce..0000000000 --- a/experiments/issue627/v5-sat.py +++ /dev/null @@ -1,71 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup_no_benchmarks import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite = suites.suite_satisficing_with_ipc11() - - configs = { - IssueConfig('lazy-greedy-ff', [ - '--heuristic', - 'h=ff()', - '--search', - 'lazy_greedy(h, preferred=h)' - ]), - IssueConfig('lama-first', [], - driver_options=['--alias', 'lama-first'] - ), - IssueConfig('eager_greedy_cg', [ - '--heuristic', - 'h=cg()', - '--search', - 'eager_greedy(h, preferred=h)' - ]), - IssueConfig('eager_greedy_cea', [ - '--heuristic', - 'h=cea()', - '--search', - 'eager_greedy(h, preferred=h)' - ]), - } - - exp = IssueExperiment( - benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/", - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=["memory"], - filter_config=["issue627-v3-base-%s" % config.nick, - "issue627-v5-%s" % config.nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_base_v5_sat_memory_%s.png' % config.nick - ) - - exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=["issue627-v3-base-%s" % config.nick, - "issue627-v5-%s" % config.nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_base_v5_sat_total_time_%s.png' % config.nick - ) - - exp() - -main(revisions=['issue627-v3-base', 'issue627-v5']) diff --git a/experiments/issue627/v5.py b/experiments/issue627/v5.py deleted file mode 100755 index bac3e5dcc9..0000000000 --- a/experiments/issue627/v5.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup_no_benchmarks import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('astar-blind', ['--search', 'astar(blind())']), - IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']), - IssueConfig('astar-ipdb', ['--search', 'astar(ipdb())']), - IssueConfig('astar-cegar-original', ['--search', 'astar(cegar(subtasks=[original()], max_states=10000, max_time=infinity))']), - IssueConfig('astar-cegar-lm-goals', ['--search', 'astar(cegar(subtasks=[landmarks(),goals()], max_states=10000, max_time=infinity))']), - } - - exp = IssueExperiment( - benchmarks_dir="/infai/pommeren/projects/downward/benchmarks/", - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=["memory"], - filter_config=["issue627-v3-base-%s" % config.nick, - "issue627-v5-%s" % config.nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_base_v5_memory_%s.png' % config.nick - ) - - exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - filter_config=["issue627-v3-base-%s" % config.nick, - "issue627-v5-%s" % config.nick], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue627_base_v5_total_time_%s.png' % config.nick - ) - - exp() - -main(revisions=['issue627-v3-base', 'issue627-v5']) diff --git a/experiments/issue629/common_setup.py b/experiments/issue629/common_setup.py deleted file mode 100644 index 934531f15d..0000000000 --- a/experiments/issue629/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, - grid_priority=None, path=None, test_suite=None, - email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(benchmarks_dir, suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue629/experimental-branches.py b/experiments/issue629/experimental-branches.py deleted file mode 100755 index fd179575a0..0000000000 --- a/experiments/issue629/experimental-branches.py +++ /dev/null @@ -1,47 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks') - suite=[ - 'airport', - 'depot', - 'driverlog', - 'elevators-opt08-strips', - 'elevators-opt11-strips', - 'freecell', - 'hiking-opt14-strips', - 'pipesworld-tankage', - ] - - configs = { - IssueConfig( - 'astar-blind-ssec', - ['--search', 'astar(blind(), pruning=stubborn_sets_ec())'] - ), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - exp() - -# issue629-experimental-base is based on issue629-v2-base and only removed the ordering of actions after pruning -# issue629-experimental is based on issue629-v4 and only removed the ordering of actions after pruning -# Both branches will not be merged. -main(revisions=['issue629-experimental-base', 'issue629-experimental']) diff --git a/experiments/issue629/relativescatter.py b/experiments/issue629/relativescatter.py deleted file mode 100644 index 68e2529f34..0000000000 --- a/experiments/issue629/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue629/suites.py b/experiments/issue629/suites.py deleted file mode 100644 index 4615212cfd..0000000000 --- a/experiments/issue629/suites.py +++ /dev/null @@ -1,350 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import argparse -import textwrap - - -HELP = "Convert suite name to list of domains or tasks." - - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted(set( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat())) - - -def suite_unsolvable(): - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt() + suite_ipc14_opt_strips()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat() + suite_ipc14_sat_strips()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("suite", help="suite name") - return parser.parse_args() - - -def main(): - prefix = "suite_" - suite_names = [ - name[len(prefix):] for name in sorted(globals().keys()) - if name.startswith(prefix)] - parser = argparse.ArgumentParser(description=HELP) - parser.add_argument("suite", choices=suite_names, help="suite name") - parser.add_argument( - "--width", default=72, type=int, - help="output line width (default: %(default)s). Use 1 for single " - "column.") - args = parser.parse_args() - suite_func = globals()[prefix + args.suite] - print(textwrap.fill( - str(suite_func()), - width=args.width, - break_long_words=False, - break_on_hyphens=False)) - - -if __name__ == "__main__": - main() diff --git a/experiments/issue629/v1.py b/experiments/issue629/v1.py deleted file mode 100755 index ecf70046c1..0000000000 --- a/experiments/issue629/v1.py +++ /dev/null @@ -1,52 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not available, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('astar-blind', ['--search', 'astar(blind())']), - IssueConfig('astar-blind-sss', ['--search', 'astar(blind(), pruning=stubborn_sets_simple())']), - IssueConfig('astar-blind-ssec', ['--search', 'astar(blind(), pruning=stubborn_sets_ec())']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - if matplotlib: - for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - - exp() - -main(revisions=['issue629-base', 'issue629-v1']) diff --git a/experiments/issue629/v10.py b/experiments/issue629/v10.py deleted file mode 100755 index ad54805f3d..0000000000 --- a/experiments/issue629/v10.py +++ /dev/null @@ -1,53 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not available, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('astar-blind', ['--search', 'astar(blind())']), - IssueConfig('astar-blind-sss', ['--search', 'astar(blind(), pruning=stubborn_sets_simple())']), - IssueConfig('astar-blind-ssec', ['--search', 'astar(blind(), pruning=stubborn_sets_ec())']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - if matplotlib: - for r1, r2 in zip(revisions, revisions[1:]) + [(revisions[0], revisions[-1])]: - for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in [r1, r2]], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, r1, r2) - ) - - exp() - -main(revisions=['issue629-v9', 'issue629-v10']) diff --git a/experiments/issue629/v2.py b/experiments/issue629/v2.py deleted file mode 100755 index 0236bd1907..0000000000 --- a/experiments/issue629/v2.py +++ /dev/null @@ -1,52 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not available, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('astar-blind', ['--search', 'astar(blind())']), - IssueConfig('astar-blind-sss', ['--search', 'astar(blind(), pruning=stubborn_sets_simple())']), - IssueConfig('astar-blind-ssec', ['--search', 'astar(blind(), pruning=stubborn_sets_ec())']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - if matplotlib: - for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - - exp() - -main(revisions=['issue629-v2-base', 'issue629-v2']) diff --git a/experiments/issue629/v3.py b/experiments/issue629/v3.py deleted file mode 100755 index 1ff29b4e5c..0000000000 --- a/experiments/issue629/v3.py +++ /dev/null @@ -1,52 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not available, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('astar-blind', ['--search', 'astar(blind())']), - IssueConfig('astar-blind-sss', ['--search', 'astar(blind(), pruning=stubborn_sets_simple())']), - IssueConfig('astar-blind-ssec', ['--search', 'astar(blind(), pruning=stubborn_sets_ec())']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - if matplotlib: - for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - - exp() - -main(revisions=['issue629-v2-base', 'issue629-v3']) diff --git a/experiments/issue629/v4.py b/experiments/issue629/v4.py deleted file mode 100755 index d4ffdabb50..0000000000 --- a/experiments/issue629/v4.py +++ /dev/null @@ -1,52 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not available, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('astar-blind', ['--search', 'astar(blind())']), - IssueConfig('astar-blind-sss', ['--search', 'astar(blind(), pruning=stubborn_sets_simple())']), - IssueConfig('astar-blind-ssec', ['--search', 'astar(blind(), pruning=stubborn_sets_ec())']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - if matplotlib: - for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - - exp() - -main(revisions=['issue629-v2-base', 'issue629-v4']) diff --git a/experiments/issue629/v5-v6.py b/experiments/issue629/v5-v6.py deleted file mode 100755 index ff0dc87688..0000000000 --- a/experiments/issue629/v5-v6.py +++ /dev/null @@ -1,52 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not available, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('astar-blind', ['--search', 'astar(blind())']), - IssueConfig('astar-blind-sss', ['--search', 'astar(blind(), pruning=stubborn_sets_simple())']), - IssueConfig('astar-blind-ssec', ['--search', 'astar(blind(), pruning=stubborn_sets_ec())']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - if matplotlib: - for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - - exp() - -main(revisions=['issue629-v5', 'issue629-v6']) diff --git a/experiments/issue629/v7-9.py b/experiments/issue629/v7-9.py deleted file mode 100755 index 8b11788c54..0000000000 --- a/experiments/issue629/v7-9.py +++ /dev/null @@ -1,53 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not available, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('astar-blind', ['--search', 'astar(blind())']), - IssueConfig('astar-blind-sss', ['--search', 'astar(blind(), pruning=stubborn_sets_simple())']), - IssueConfig('astar-blind-ssec', ['--search', 'astar(blind(), pruning=stubborn_sets_ec())']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - if matplotlib: - for r1, r2 in zip(revisions, revisions[1:]) + [(revisions[0], revisions[-1])]: - for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in [r1, r2]], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, r1, r2) - ) - - exp() - -main(revisions=['issue629-v7-base', 'issue629-v7', 'issue629-v8', 'issue629-v9']) diff --git a/experiments/issue632/common_setup.py b/experiments/issue632/common_setup.py deleted file mode 100644 index 953c87e27f..0000000000 --- a/experiments/issue632/common_setup.py +++ /dev/null @@ -1,356 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, suite, revisions=[], configs={}, grid_priority=None, - path=None, test_suite=None, email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(os.path.join(repo, "benchmarks"), suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue632/relativescatter.py b/experiments/issue632/relativescatter.py deleted file mode 100644 index 68e2529f34..0000000000 --- a/experiments/issue632/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue632/suites.py b/experiments/issue632/suites.py deleted file mode 100644 index ec030b6d4c..0000000000 --- a/experiments/issue632/suites.py +++ /dev/null @@ -1,315 +0,0 @@ -# Benchmark suites from the Fast Downward benchmark collection. - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-agl14-adl', - 'citycar-agl14-adl', - 'maintenance-agl14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-agl14-strips', - 'childsnack-agl14-strips', - 'floortile-agl14-strips', - 'ged-agl14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-agl14-strips', - 'tetris-agl14-strips', - 'thoughtful-agl14-strips', - 'transport-agl14-strips', - 'visitall-agl14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-mco14-adl', - 'citycar-mco14-adl', - 'maintenance-mco14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-mco14-strips', - 'floortile-mco14-strips', - 'ged-mco14-strips', - 'hiking-mco14-strips', - 'openstacks-mco14-strips', - 'parking-mco14-strips', - 'tetris-mco14-strips', - 'thoughtful-mco14-strips', - 'transport-mco14-strips', - 'visitall-mco14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-opt14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-sat14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat()) - - -def suite_unsolvable(): - # TODO: Add other unsolvable problems (Miconic-FullADL). - # TODO: Add 'fsc-grid-r:prize5x5_R.pddl' and 't0-uts:uts_r-02.pddl' - # if the extra-domains branch is merged. - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_alternative_formulations()) diff --git a/experiments/issue632/v1-landmarks-goals.py b/experiments/issue632/v1-landmarks-goals.py deleted file mode 100755 index 24894d603a..0000000000 --- a/experiments/issue632/v1-landmarks-goals.py +++ /dev/null @@ -1,37 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport -import suites - - -configs = [ - IssueConfig( - "cegar-900s", - ["--search", "astar(cegar(subtasks=[landmarks(),goals()],max_time=900))"]), -] -revisions = ["issue632-base", "issue632-v1"] - -exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suites.suite_optimal_with_ipc11(), - test_suite=["depot:pfile1"], - email="jendrik.seipp@unibas.ch", -) - -exp.add_comparison_table_step() - -for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - -exp() diff --git a/experiments/issue632/v1.py b/experiments/issue632/v1.py deleted file mode 100755 index 4977aa5641..0000000000 --- a/experiments/issue632/v1.py +++ /dev/null @@ -1,37 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport -import suites - - -configs = [ - IssueConfig( - "cegar-10K-original", - ["--search", "astar(cegar(subtasks=[original()],max_states=10000,max_time=infinity))"]), -] -revisions = ["issue632-base", "issue632-v1"] - -exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suites.suite_optimal_with_ipc11(), - test_suite=["depot:pfile1"], - email="jendrik.seipp@unibas.ch", -) - -exp.add_comparison_table_step() - -for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - -exp() diff --git a/experiments/issue633/common_setup.py b/experiments/issue633/common_setup.py deleted file mode 100644 index 0b2eebe0ff..0000000000 --- a/experiments/issue633/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Wrapper for FastDownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, suite, revisions=[], configs={}, grid_priority=None, - path=None, test_suite=None, email=None, processes=1, - **kwargs): - """Create a DownwardExperiment with some convenience features. - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(os.path.join(repo, "benchmarks"), suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue633/v1.py b/experiments/issue633/v1.py deleted file mode 100755 index e01af9f454..0000000000 --- a/experiments/issue633/v1.py +++ /dev/null @@ -1,31 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment - - -configs = [ - IssueConfig( - "cegar-10K-original", - ["--search", "astar(cegar(subtasks=[original()],max_states=10000,max_time=infinity))"]), - IssueConfig( - "cegar-10K-landmarks-goals", - ["--search", "astar(cegar(subtasks=[landmarks(), goals()],max_states=10000,max_time=infinity))"]), - IssueConfig( - "cegar-900s-landmarks-goals", - ["--search", "astar(cegar(subtasks=[landmarks(), goals()],max_states=infinity,max_time=900))"]), -] - -exp = IssueExperiment( - revisions=["issue633-base", "issue633-v1"], - configs=configs, - suite=suites.suite_optimal_with_ipc11(), - test_suite=["depot:pfile1"], - email="jendrik.seipp@unibas.ch", -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue633/v2.py b/experiments/issue633/v2.py deleted file mode 100755 index 7889cea727..0000000000 --- a/experiments/issue633/v2.py +++ /dev/null @@ -1,31 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment - - -configs = [ - IssueConfig( - "cegar-10K-original", - ["--search", "astar(cegar(subtasks=[original()],max_states=10000,max_time=infinity))"]), - IssueConfig( - "cegar-10K-landmarks-goals", - ["--search", "astar(cegar(subtasks=[landmarks(), goals()],max_states=10000,max_time=infinity))"]), - IssueConfig( - "cegar-900s-landmarks-goals", - ["--search", "astar(cegar(subtasks=[landmarks(), goals()],max_states=infinity,max_time=900))"]), -] - -exp = IssueExperiment( - revisions=["issue633-v1", "issue633-v2"], - configs=configs, - suite=suites.suite_optimal_with_ipc11(), - test_suite=["depot:pfile1"], - email="jendrik.seipp@unibas.ch", -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue635/common_setup.py b/experiments/issue635/common_setup.py deleted file mode 100644 index 4dff4aacfd..0000000000 --- a/experiments/issue635/common_setup.py +++ /dev/null @@ -1,335 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step( - 'publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step( - "publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step(step_name, make_scatter_plots)) diff --git a/experiments/issue635/relativescatter.py b/experiments/issue635/relativescatter.py deleted file mode 100644 index 14d5d42752..0000000000 --- a/experiments/issue635/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue635/v1.py b/experiments/issue635/v1.py deleted file mode 100755 index 3fe72f7d7e..0000000000 --- a/experiments/issue635/v1.py +++ /dev/null @@ -1,52 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue635-base", "issue635-v1"] -CONFIGS = [ - IssueConfig( - heuristic, - ["--search", "astar({})".format(heuristic)], - driver_options=["--search-time-limit", "10m"]) - for heuristic in ["hm(m=2)", "ipdb()", "cea()", "cg()"] -] -SUITE = [ - 'airport', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', - 'nomystery-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-opt11-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-opt11-strips', 'sokoban-opt11-strips', - 'storage', 'tetris-opt14-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt14-strips', - 'woodworking-opt11-strips', 'zenotravel'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["memory", "total_time"]) - -exp() diff --git a/experiments/issue635/v2.py b/experiments/issue635/v2.py deleted file mode 100755 index d5d469cc28..0000000000 --- a/experiments/issue635/v2.py +++ /dev/null @@ -1,52 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue635-base", "issue635-v2"] -CONFIGS = [ - IssueConfig( - heuristic, - ["--search", "astar({})".format(heuristic)], - driver_options=["--search-time-limit", "10m"]) - for heuristic in ["hm(m=2)"] -] -SUITE = [ - 'airport', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', - 'nomystery-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-opt11-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-opt11-strips', 'sokoban-opt11-strips', - 'storage', 'tetris-opt14-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt14-strips', - 'woodworking-opt11-strips', 'zenotravel'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["memory", "total_time"]) - -exp() diff --git a/experiments/issue635/v3.py b/experiments/issue635/v3.py deleted file mode 100755 index c487f61e17..0000000000 --- a/experiments/issue635/v3.py +++ /dev/null @@ -1,52 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue635-base", "issue635-v3"] -CONFIGS = [ - IssueConfig( - heuristic, - ["--search", "astar({})".format(heuristic)], - driver_options=["--search-time-limit", "10m"]) - for heuristic in ["hm(m=2)"] -] -SUITE = [ - 'airport', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', - 'nomystery-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-opt11-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-opt11-strips', 'sokoban-opt11-strips', - 'storage', 'tetris-opt14-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt14-strips', - 'woodworking-opt11-strips', 'zenotravel'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["memory", "total_time"]) - -exp() diff --git a/experiments/issue637/common_setup.py b/experiments/issue637/common_setup.py deleted file mode 100644 index 338314a650..0000000000 --- a/experiments/issue637/common_setup.py +++ /dev/null @@ -1,384 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue637/parser.py b/experiments/issue637/parser.py deleted file mode 100755 index a757e26c5e..0000000000 --- a/experiments/issue637/parser.py +++ /dev/null @@ -1,74 +0,0 @@ -#! /usr/bin/env python - -import logging -import re - -from lab.parser import Parser - - -class CommonParser(Parser): - def add_difference(self, diff, val1, val2): - def diff_func(content, props): - if props.get(val1) is None or props.get(val2) is None: - diff_val = None - else: - diff_val = props.get(val1) - props.get(val2) - props[diff] = diff_val - self.add_function(diff_func) - - def _get_flags(self, flags_string): - flags = 0 - for char in flags_string: - flags |= getattr(re, char) - return flags - - def add_repeated_pattern( - self, name, regex, file="run.log", required=True, type=int, - flags=""): - flags += "M" - - def find_all_occurences(content, props): - matches = re.findall(regex, content, flags=self._get_flags(flags)) - if required and not matches: - logging.error("Pattern {0} not found in file {1}".format(regex, file)) - props[name] = [type(m) for m in matches] - - self.add_function(find_all_occurences, file=file) - - def add_pattern(self, name, regex, file="run.log", required=False, type=int, flags="M"): - Parser.add_pattern(self, name, regex, file=file, required=required, type=type, flags=flags) - - def add_bottom_up_pattern(self, name, regex, file="run.log", required=True, type=int, flags=""): - - def search_from_bottom(content, props): - reversed_content = "\n".join(reversed(content.splitlines())) - match = re.search(regex, reversed_content, flags=self._get_flags(flags)) - if required and not match: - logging.error("Pattern {0} not found in file {1}".format(regex, file)) - if match: - props[name] = type(match.group(1)) - - self.add_function(search_from_bottom, file=file) - - -def no_search(content, props): - if "search_start_time" not in props: - error = props.get("error") - if error is not None and error != "incomplete-search-found-no-plan": - props["error"] = "no-search-due-to-" + error - - -def main(): - parser = CommonParser() - parser.add_pattern("search_start_time", r"\[g=0, 1 evaluated, 0 expanded, t=(.+)s, \d+ KB\]", type=float, required=False) - parser.add_pattern("search_start_memory", r"\[g=0, 1 evaluated, 0 expanded, t=.+s, (\d+) KB\]", type=int, required=False) - parser.add_pattern("init_time", r"^Time for initializing additive Cartesian heuristic: (.+)s$", type=float, required=False) - parser.add_pattern("cartesian_states", r"^Cartesian states: (\d+)$", type=int, required=False) - - parser.add_function(no_search) - - parser.parse() - - -if __name__ == "__main__": - main() diff --git a/experiments/issue637/relativescatter.py b/experiments/issue637/relativescatter.py deleted file mode 100644 index d8033a3324..0000000000 --- a/experiments/issue637/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if not val1 or not val2: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue637/v1.py b/experiments/issue637/v1.py deleted file mode 100755 index 93245a9e90..0000000000 --- a/experiments/issue637/v1.py +++ /dev/null @@ -1,66 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue637-v1-base", "issue637-v1"] -DRIVER_OPTIONS = ["--overall-time-limit", "30m"] -CONFIGS = [ - IssueConfig( - "cegar-landmarks-goals", - ["--search", "astar(cegar())"], - driver_options=DRIVER_OPTIONS), - IssueConfig( - "cegar-original", - ["--search", "astar(cegar(subtasks=[original()]))"], - driver_options=DRIVER_OPTIONS), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_parse_again_step() -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue637/v2.py b/experiments/issue637/v2.py deleted file mode 100755 index c6cf19c714..0000000000 --- a/experiments/issue637/v2.py +++ /dev/null @@ -1,67 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue637-v1", "issue637-v2"] -DRIVER_OPTIONS = ["--overall-time-limit", "30m"] -CONFIGS = [ - IssueConfig( - "cegar-landmarks-goals", - ["--search", "astar(cegar())"], - driver_options=DRIVER_OPTIONS), - IssueConfig( - "cegar-original", - ["--search", "astar(cegar(subtasks=[original()]))"], - driver_options=DRIVER_OPTIONS), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - + ["search_start_time", "search_start_memory", "init_time"]) - -for attribute in ["memory", "total_time", "init_time", "expansions_until_last_jump"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue637/v3.py b/experiments/issue637/v3.py deleted file mode 100755 index 4d4c851315..0000000000 --- a/experiments/issue637/v3.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue637-v1", "issue637-v2", "issue637-v3"] -REVISIONS = ["issue637-v1", "issue637-v3"] -DRIVER_OPTIONS = ["--overall-time-limit", "30m"] -CONFIGS = [ - IssueConfig( - "cegar-landmarks-goals", - ["--search", "astar(cegar())"], - driver_options=DRIVER_OPTIONS), - IssueConfig( - "cegar-original", - ["--search", "astar(cegar(subtasks=[original()]))"], - driver_options=DRIVER_OPTIONS), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_fetcher(os.path.join(DIR, 'data/issue637-v2-eval')) - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - "search_start_time", "search_start_memory", "init_time", "cartesian_states"] -exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -if len(REVISIONS) == 2: - for attribute in ["init_time", "expansions_until_last_jump"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue637/v4.py b/experiments/issue637/v4.py deleted file mode 100755 index 3a6498ea3a..0000000000 --- a/experiments/issue637/v4.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue637-v3", "issue637-v4"] -#REVISIONS = ["issue637-v4"] -DRIVER_OPTIONS = ["--overall-time-limit", "30m"] -CONFIGS = [ - IssueConfig( - "cegar-landmarks-goals", - ["--search", "astar(cegar())"], - driver_options=DRIVER_OPTIONS), - IssueConfig( - "cegar-original", - ["--search", "astar(cegar(subtasks=[original()]))"], - driver_options=DRIVER_OPTIONS), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_fetcher(os.path.join(DIR, 'data/issue637-v3-eval')) - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - "search_start_time", "search_start_memory", "init_time", "cartesian_states"] -exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -if len(REVISIONS) == 2: - for attribute in ["init_time", "expansions_until_last_jump"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue637/v5.py b/experiments/issue637/v5.py deleted file mode 100755 index afdc4fa802..0000000000 --- a/experiments/issue637/v5.py +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue637-v1-base", "issue637-v5"] -#REVISIONS = ["issue637-v5"] -DRIVER_OPTIONS = ["--overall-time-limit", "30m"] -CONFIGS = [ - IssueConfig( - "cegar-landmarks-goals", - ["--search", "astar(cegar())"], - driver_options=DRIVER_OPTIONS), - IssueConfig( - "cegar-original", - ["--search", "astar(cegar(subtasks=[original()]))"], - driver_options=DRIVER_OPTIONS), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_fetcher(os.path.join(DIR, "data/issue637-v1-eval")) -exp.add_fetcher(os.path.join(DIR, "data/issue637-v4-eval")) - - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - "search_start_time", "search_start_memory", "init_time", "cartesian_states"] -exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -if len(REVISIONS) == 2: - for attribute in ["init_time", "expansions_until_last_jump"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue637/v6.py b/experiments/issue637/v6.py deleted file mode 100755 index 697c2723ba..0000000000 --- a/experiments/issue637/v6.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue637-v1-base", "issue637-v6"] -DRIVER_OPTIONS = ["--overall-time-limit", "30m"] -CONFIGS = [ - IssueConfig( - "cegar-landmarks-goals", - ["--search", "astar(cegar())"], - driver_options=DRIVER_OPTIONS), - IssueConfig( - "cegar-original", - ["--search", "astar(cegar(subtasks=[original()]))"], - driver_options=DRIVER_OPTIONS), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - "search_start_memory", "init_time"] -#exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -if len(REVISIONS) == 2: - for attribute in ["init_time", "expansions_until_last_jump"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue637/v7.py b/experiments/issue637/v7.py deleted file mode 100755 index 0f87ce97fd..0000000000 --- a/experiments/issue637/v7.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue637-v1-base", "issue637-v6", "issue637-v7"] -DRIVER_OPTIONS = [] -CONFIGS = [ - IssueConfig( - "cegar-landmarks-goals", - ["--search", "astar(cegar())"], - driver_options=DRIVER_OPTIONS), - IssueConfig( - "cegar-original", - ["--search", "astar(cegar(subtasks=[original()]))"], - driver_options=DRIVER_OPTIONS), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - "search_start_memory", "init_time"] -#exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -if len(REVISIONS) == 2: - for attribute in ["init_time", "expansions_until_last_jump"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue637/v8.py b/experiments/issue637/v8.py deleted file mode 100755 index 9923f53ba2..0000000000 --- a/experiments/issue637/v8.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue637-v8-base", "issue637-v8"] -DRIVER_OPTIONS = [] -CONFIGS = [ - IssueConfig( - "cegar-landmarks-goals", - ["--search", "astar(cegar())"], - driver_options=DRIVER_OPTIONS), - IssueConfig( - "cegar-original", - ["--search", "astar(cegar(subtasks=[original()]))"], - driver_options=DRIVER_OPTIONS), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - "search_start_memory", "init_time"] -#exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -if len(REVISIONS) == 2: - for attribute in ["init_time", "expansions_until_last_jump"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue638/common_setup.py b/experiments/issue638/common_setup.py deleted file mode 100644 index 4dff4aacfd..0000000000 --- a/experiments/issue638/common_setup.py +++ /dev/null @@ -1,335 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step( - 'publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step( - "publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step(step_name, make_scatter_plots)) diff --git a/experiments/issue638/custom-parser.py b/experiments/issue638/custom-parser.py deleted file mode 100755 index 83ffb631a7..0000000000 --- a/experiments/issue638/custom-parser.py +++ /dev/null @@ -1,24 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - - -class CustomParser(Parser): - def __init__(self): - Parser.__init__(self) - self.add_pattern( - "num_sga_patterns", - "Found (\d+) SGA patterns.", - required=False, - type=int) - self.add_pattern( - "num_interesting_patterns", - "Found (\d+) interesting patterns.", - required=False, - type=int) - - -if __name__ == "__main__": - parser = CustomParser() - print "Running custom parser" - parser.parse() diff --git a/experiments/issue638/relativescatter.py b/experiments/issue638/relativescatter.py deleted file mode 100644 index 14d5d42752..0000000000 --- a/experiments/issue638/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue638/v1.py b/experiments/issue638/v1.py deleted file mode 100755 index abf630b4ec..0000000000 --- a/experiments/issue638/v1.py +++ /dev/null @@ -1,47 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os, sys - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue638-base", "issue638-v1"] -CONFIGS = [ - IssueConfig(heuristic, ["--search", "astar({})".format(heuristic)]) - for heuristic in [ - "cpdbs(patterns=systematic(3), dominance_pruning=true)", - "cpdbs(patterns=systematic(4), dominance_pruning=true)", - "operatorcounting([pho_constraints(patterns=systematic(3))])", - "operatorcounting([pho_constraints(patterns=systematic(4))])", - ] -] - -sys.path.append(BENCHMARKS_DIR) -import suites - -SUITE = suites.suite_optimal_strips() -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_command("parser", ["custom-parser.py"]) - -exp.add_comparison_table_step( - attributes=exp.DEFAULT_TABLE_ATTRIBUTES + - ["num_sga_patterns", "num_interesting_patterns"]) -exp.add_scatter_plot_step(attributes=["total_time"]) - -exp() diff --git a/experiments/issue643/common_setup.py b/experiments/issue643/common_setup.py deleted file mode 100644 index 953c87e27f..0000000000 --- a/experiments/issue643/common_setup.py +++ /dev/null @@ -1,356 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, suite, revisions=[], configs={}, grid_priority=None, - path=None, test_suite=None, email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(os.path.join(repo, "benchmarks"), suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue643/relativescatter.py b/experiments/issue643/relativescatter.py deleted file mode 100644 index 68e2529f34..0000000000 --- a/experiments/issue643/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue643/suites.py b/experiments/issue643/suites.py deleted file mode 100644 index ec030b6d4c..0000000000 --- a/experiments/issue643/suites.py +++ /dev/null @@ -1,315 +0,0 @@ -# Benchmark suites from the Fast Downward benchmark collection. - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-agl14-adl', - 'citycar-agl14-adl', - 'maintenance-agl14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-agl14-strips', - 'childsnack-agl14-strips', - 'floortile-agl14-strips', - 'ged-agl14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-agl14-strips', - 'tetris-agl14-strips', - 'thoughtful-agl14-strips', - 'transport-agl14-strips', - 'visitall-agl14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-mco14-adl', - 'citycar-mco14-adl', - 'maintenance-mco14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-mco14-strips', - 'floortile-mco14-strips', - 'ged-mco14-strips', - 'hiking-mco14-strips', - 'openstacks-mco14-strips', - 'parking-mco14-strips', - 'tetris-mco14-strips', - 'thoughtful-mco14-strips', - 'transport-mco14-strips', - 'visitall-mco14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-opt14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-sat14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat()) - - -def suite_unsolvable(): - # TODO: Add other unsolvable problems (Miconic-FullADL). - # TODO: Add 'fsc-grid-r:prize5x5_R.pddl' and 't0-uts:uts_r-02.pddl' - # if the extra-domains branch is merged. - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_alternative_formulations()) diff --git a/experiments/issue643/v1.py b/experiments/issue643/v1.py deleted file mode 100755 index dd232bd0a9..0000000000 --- a/experiments/issue643/v1.py +++ /dev/null @@ -1,40 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport -import suites - - -configs = [ - IssueConfig( - "cegar-landmarks-10k", - ["--search", "astar(cegar(subtasks=[landmarks()],max_states=10000))"]), - IssueConfig( - "cegar-landmarks-goals-900s", - ["--search", "astar(cegar(subtasks=[landmarks(),goals()],max_time=900))"]), -] -revisions = ["issue643-base", "issue643-v1"] - -exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suites.suite_optimal_strips(), - test_suite=["depot:pfile1"], - email="jendrik.seipp@unibas.ch", -) - -exp.add_comparison_table_step() - -for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - -exp() diff --git a/experiments/issue644/common_setup.py b/experiments/issue644/common_setup.py deleted file mode 100644 index 934531f15d..0000000000 --- a/experiments/issue644/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, - grid_priority=None, path=None, test_suite=None, - email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(benchmarks_dir, suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue644/ms-parser.py b/experiments/issue644/ms-parser.py deleted file mode 100755 index c219b72ba5..0000000000 --- a/experiments/issue644/ms-parser.py +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) -parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) -parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[t=.+s\]', required=False, type=float) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -parser.parse() diff --git a/experiments/issue644/relativescatter.py b/experiments/issue644/relativescatter.py deleted file mode 100644 index 68e2529f34..0000000000 --- a/experiments/issue644/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue644/suites.py b/experiments/issue644/suites.py deleted file mode 100644 index 4615212cfd..0000000000 --- a/experiments/issue644/suites.py +++ /dev/null @@ -1,350 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import argparse -import textwrap - - -HELP = "Convert suite name to list of domains or tasks." - - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted(set( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat())) - - -def suite_unsolvable(): - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt() + suite_ipc14_opt_strips()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat() + suite_ipc14_sat_strips()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("suite", help="suite name") - return parser.parse_args() - - -def main(): - prefix = "suite_" - suite_names = [ - name[len(prefix):] for name in sorted(globals().keys()) - if name.startswith(prefix)] - parser = argparse.ArgumentParser(description=HELP) - parser.add_argument("suite", choices=suite_names, help="suite name") - parser.add_argument( - "--width", default=72, type=int, - help="output line width (default: %(default)s). Use 1 for single " - "column.") - args = parser.parse_args() - suite_func = globals()[prefix + args.suite] - print(textwrap.fill( - str(suite_func()), - width=args.width, - break_long_words=False, - break_on_hyphens=False)) - - -if __name__ == "__main__": - main() diff --git a/experiments/issue644/v1-dfp-tiebreaking-abp-report.py b/experiments/issue644/v1-dfp-tiebreaking-abp-report.py deleted file mode 100755 index a0393dc064..0000000000 --- a/experiments/issue644/v1-dfp-tiebreaking-abp-report.py +++ /dev/null @@ -1,121 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('dfp-reg-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-reg-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-reg-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-reg-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-inv-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-inv-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-inv-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-rnd-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-rnd-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_fetcher('data/issue644-v1-dfp-tiebreaking-eval', - filter_config=[ - 'issue644-base-dfp-reg-otn-abp-b50k', - 'issue644-base-dfp-reg-nto-abp-b50k', - 'issue644-base-dfp-reg-rnd-abp-b50k', - 'issue644-base-dfp-inv-otn-abp-b50k', - 'issue644-base-dfp-inv-nto-abp-b50k', - 'issue644-base-dfp-inv-rnd-abp-b50k', - 'issue644-base-dfp-rnd-otn-abp-b50k', - 'issue644-base-dfp-rnd-nto-abp-b50k', - 'issue644-base-dfp-rnd-rnd-abp-b50k', - 'issue644-v1-dfp-reg-otn-abp-b50k', - 'issue644-v1-dfp-reg-nto-abp-b50k', - 'issue644-v1-dfp-reg-rnd-abp-b50k', - 'issue644-v1-dfp-inv-otn-abp-b50k', - 'issue644-v1-dfp-inv-nto-abp-b50k', - 'issue644-v1-dfp-inv-rnd-abp-b50k', - 'issue644-v1-dfp-rnd-otn-abp-b50k', - 'issue644-v1-dfp-rnd-nto-abp-b50k', - 'issue644-v1-dfp-rnd-rnd-abp-b50k', - ]) - - exp.add_comparison_table_step() - - #if matplotlib: - #for attribute in ["memory", "total_time"]: - #for config in configs: - #exp.add_report( - #RelativeScatterPlotReport( - #attributes=[attribute], - #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - #get_category=lambda run1, run2: run1.get("domain"), - #), - #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - #) - - exp() - -main(revisions=['issue644-base', 'issue644-v1']) diff --git a/experiments/issue644/v1-dfp-tiebreaking-pba-report.py b/experiments/issue644/v1-dfp-tiebreaking-pba-report.py deleted file mode 100755 index b4545a57a2..0000000000 --- a/experiments/issue644/v1-dfp-tiebreaking-pba-report.py +++ /dev/null @@ -1,121 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - #IssueConfig('dfp-reg-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-reg-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-reg-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-inv-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-inv-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-inv-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-rnd-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-rnd-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_fetcher('data/issue644-v1-dfp-tiebreaking-eval', - filter_config=[ - 'issue644-base-dfp-reg-otn-pba-b50k', - 'issue644-base-dfp-reg-nto-pba-b50k', - 'issue644-base-dfp-reg-rnd-pba-b50k', - 'issue644-base-dfp-inv-otn-pba-b50k', - 'issue644-base-dfp-inv-nto-pba-b50k', - 'issue644-base-dfp-inv-rnd-pba-b50k', - 'issue644-base-dfp-rnd-otn-pba-b50k', - 'issue644-base-dfp-rnd-nto-pba-b50k', - 'issue644-base-dfp-rnd-rnd-pba-b50k', - 'issue644-v1-dfp-reg-otn-pba-b50k', - 'issue644-v1-dfp-reg-nto-pba-b50k', - 'issue644-v1-dfp-reg-rnd-pba-b50k', - 'issue644-v1-dfp-inv-otn-pba-b50k', - 'issue644-v1-dfp-inv-nto-pba-b50k', - 'issue644-v1-dfp-inv-rnd-pba-b50k', - 'issue644-v1-dfp-rnd-otn-pba-b50k', - 'issue644-v1-dfp-rnd-nto-pba-b50k', - 'issue644-v1-dfp-rnd-rnd-pba-b50k', - ]) - - exp.add_comparison_table_step() - - #if matplotlib: - #for attribute in ["memory", "total_time"]: - #for config in configs: - #exp.add_report( - #RelativeScatterPlotReport( - #attributes=[attribute], - #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - #get_category=lambda run1, run2: run1.get("domain"), - #), - #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - #) - - exp() - -main(revisions=['issue644-base', 'issue644-v1']) diff --git a/experiments/issue644/v1-dfp-tiebreaking.py b/experiments/issue644/v1-dfp-tiebreaking.py deleted file mode 100755 index f34c79d112..0000000000 --- a/experiments/issue644/v1-dfp-tiebreaking.py +++ /dev/null @@ -1,99 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('dfp-reg-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - #if matplotlib: - #for attribute in ["memory", "total_time"]: - #for config in configs: - #exp.add_report( - #RelativeScatterPlotReport( - #attributes=[attribute], - #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - #get_category=lambda run1, run2: run1.get("domain"), - #), - #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - #) - - exp() - -main(revisions=['issue644-base', 'issue644-v1']) diff --git a/experiments/issue644/v1.py b/experiments/issue644/v1.py deleted file mode 100755 index 3f1365d7ff..0000000000 --- a/experiments/issue644/v1.py +++ /dev/null @@ -1,90 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - #if matplotlib: - #for attribute in ["memory", "total_time"]: - #for config in configs: - #exp.add_report( - #RelativeScatterPlotReport( - #attributes=[attribute], - #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - #get_category=lambda run1, run2: run1.get("domain"), - #), - #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - #) - - exp() - -main(revisions=['issue644-base', 'issue644-v1']) diff --git a/experiments/issue644/v2-dfp-tiebreaking-abp-report.py b/experiments/issue644/v2-dfp-tiebreaking-abp-report.py deleted file mode 100755 index 9545482510..0000000000 --- a/experiments/issue644/v2-dfp-tiebreaking-abp-report.py +++ /dev/null @@ -1,121 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('dfp-reg-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-reg-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-reg-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-reg-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-inv-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-inv-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-inv-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-rnd-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-rnd-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_fetcher('data/issue644-v2-dfp-tiebreaking-eval', - filter_config=[ - 'issue644-v1-dfp-reg-otn-abp-b50k', - 'issue644-v1-dfp-reg-nto-abp-b50k', - 'issue644-v1-dfp-reg-rnd-abp-b50k', - 'issue644-v1-dfp-inv-otn-abp-b50k', - 'issue644-v1-dfp-inv-nto-abp-b50k', - 'issue644-v1-dfp-inv-rnd-abp-b50k', - 'issue644-v1-dfp-rnd-otn-abp-b50k', - 'issue644-v1-dfp-rnd-nto-abp-b50k', - 'issue644-v1-dfp-rnd-rnd-abp-b50k', - 'issue644-v2-dfp-reg-otn-abp-b50k', - 'issue644-v2-dfp-reg-nto-abp-b50k', - 'issue644-v2-dfp-reg-rnd-abp-b50k', - 'issue644-v2-dfp-inv-otn-abp-b50k', - 'issue644-v2-dfp-inv-nto-abp-b50k', - 'issue644-v2-dfp-inv-rnd-abp-b50k', - 'issue644-v2-dfp-rnd-otn-abp-b50k', - 'issue644-v2-dfp-rnd-nto-abp-b50k', - 'issue644-v2-dfp-rnd-rnd-abp-b50k', - ]) - - exp.add_comparison_table_step() - - #if matplotlib: - #for attribute in ["memory", "total_time"]: - #for config in configs: - #exp.add_report( - #RelativeScatterPlotReport( - #attributes=[attribute], - #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - #get_category=lambda run1, run2: run1.get("domain"), - #), - #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - #) - - exp() - -main(revisions=['issue644-v1', 'issue644-v2']) diff --git a/experiments/issue644/v2-dfp-tiebreaking-pba-report.py b/experiments/issue644/v2-dfp-tiebreaking-pba-report.py deleted file mode 100755 index 548f7a4080..0000000000 --- a/experiments/issue644/v2-dfp-tiebreaking-pba-report.py +++ /dev/null @@ -1,121 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - #IssueConfig('dfp-reg-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-reg-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-reg-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-inv-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-inv-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-inv-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-rnd-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - #IssueConfig('dfp-rnd-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_fetcher('data/issue644-v2-dfp-tiebreaking-eval', - filter_config=[ - 'issue644-v1-dfp-reg-otn-pba-b50k', - 'issue644-v1-dfp-reg-nto-pba-b50k', - 'issue644-v1-dfp-reg-rnd-pba-b50k', - 'issue644-v1-dfp-inv-otn-pba-b50k', - 'issue644-v1-dfp-inv-nto-pba-b50k', - 'issue644-v1-dfp-inv-rnd-pba-b50k', - 'issue644-v1-dfp-rnd-otn-pba-b50k', - 'issue644-v1-dfp-rnd-nto-pba-b50k', - 'issue644-v1-dfp-rnd-rnd-pba-b50k', - 'issue644-v2-dfp-reg-otn-pba-b50k', - 'issue644-v2-dfp-reg-nto-pba-b50k', - 'issue644-v2-dfp-reg-rnd-pba-b50k', - 'issue644-v2-dfp-inv-otn-pba-b50k', - 'issue644-v2-dfp-inv-nto-pba-b50k', - 'issue644-v2-dfp-inv-rnd-pba-b50k', - 'issue644-v2-dfp-rnd-otn-pba-b50k', - 'issue644-v2-dfp-rnd-nto-pba-b50k', - 'issue644-v2-dfp-rnd-rnd-pba-b50k', - ]) - - exp.add_comparison_table_step() - - #if matplotlib: - #for attribute in ["memory", "total_time"]: - #for config in configs: - #exp.add_report( - #RelativeScatterPlotReport( - #attributes=[attribute], - #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - #get_category=lambda run1, run2: run1.get("domain"), - #), - #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - #) - - exp() - -main(revisions=['issue644-v1', 'issue644-v2']) diff --git a/experiments/issue644/v2-dfp-tiebreaking.py b/experiments/issue644/v2-dfp-tiebreaking.py deleted file mode 100755 index 31d7bfe78c..0000000000 --- a/experiments/issue644/v2-dfp-tiebreaking.py +++ /dev/null @@ -1,99 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('dfp-reg-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-reg-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-inv-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-rnd-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - #if matplotlib: - #for attribute in ["memory", "total_time"]: - #for config in configs: - #exp.add_report( - #RelativeScatterPlotReport( - #attributes=[attribute], - #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - #get_category=lambda run1, run2: run1.get("domain"), - #), - #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - #) - - exp() - -main(revisions=['issue644-v1', 'issue644-v2']) diff --git a/experiments/issue644/v2.py b/experiments/issue644/v2.py deleted file mode 100755 index 3e207f5d7a..0000000000 --- a/experiments/issue644/v2.py +++ /dev/null @@ -1,90 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - #if matplotlib: - #for attribute in ["memory", "total_time"]: - #for config in configs: - #exp.add_report( - #RelativeScatterPlotReport( - #attributes=[attribute], - #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - #get_category=lambda run1, run2: run1.get("domain"), - #), - #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - #) - - exp() - -main(revisions=['issue644-v1', 'issue644-v2']) diff --git a/experiments/issue644/v3.py b/experiments/issue644/v3.py deleted file mode 100755 index 1cf04eff8d..0000000000 --- a/experiments/issue644/v3.py +++ /dev/null @@ -1,90 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - #if matplotlib: - #for attribute in ["memory", "total_time"]: - #for config in configs: - #exp.add_report( - #RelativeScatterPlotReport( - #attributes=[attribute], - #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - #get_category=lambda run1, run2: run1.get("domain"), - #), - #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - #) - - exp() - -main(revisions=['issue644-v3-base', 'issue644-v3']) diff --git a/experiments/issue645/common_setup.py b/experiments/issue645/common_setup.py deleted file mode 100644 index 953c87e27f..0000000000 --- a/experiments/issue645/common_setup.py +++ /dev/null @@ -1,356 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, suite, revisions=[], configs={}, grid_priority=None, - path=None, test_suite=None, email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(os.path.join(repo, "benchmarks"), suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue645/ms-parser.py b/experiments/issue645/ms-parser.py deleted file mode 100755 index 49649e2780..0000000000 --- a/experiments/issue645/ms-parser.py +++ /dev/null @@ -1,81 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) -parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) -parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[t=.+s\]', required=False, type=float) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -def count_dfp_no_goal_relevant_ts(content, props): - counter = 0 - for line in content.splitlines(): - if line == 'found no goal relevant pair': - counter += 1 - props['ms_dfp_nogoalrelevantpair_counter'] = counter - -parser.add_function(count_dfp_no_goal_relevant_ts) - -parser.parse() diff --git a/experiments/issue645/relativescatter.py b/experiments/issue645/relativescatter.py deleted file mode 100644 index 68e2529f34..0000000000 --- a/experiments/issue645/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue645/suites.py b/experiments/issue645/suites.py deleted file mode 100644 index ec030b6d4c..0000000000 --- a/experiments/issue645/suites.py +++ /dev/null @@ -1,315 +0,0 @@ -# Benchmark suites from the Fast Downward benchmark collection. - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-agl14-adl', - 'citycar-agl14-adl', - 'maintenance-agl14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-agl14-strips', - 'childsnack-agl14-strips', - 'floortile-agl14-strips', - 'ged-agl14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-agl14-strips', - 'tetris-agl14-strips', - 'thoughtful-agl14-strips', - 'transport-agl14-strips', - 'visitall-agl14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-mco14-adl', - 'citycar-mco14-adl', - 'maintenance-mco14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-mco14-strips', - 'floortile-mco14-strips', - 'ged-mco14-strips', - 'hiking-mco14-strips', - 'openstacks-mco14-strips', - 'parking-mco14-strips', - 'tetris-mco14-strips', - 'thoughtful-mco14-strips', - 'transport-mco14-strips', - 'visitall-mco14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-opt14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-sat14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat()) - - -def suite_unsolvable(): - # TODO: Add other unsolvable problems (Miconic-FullADL). - # TODO: Add 'fsc-grid-r:prize5x5_R.pddl' and 't0-uts:uts_r-02.pddl' - # if the extra-domains branch is merged. - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_alternative_formulations()) diff --git a/experiments/issue645/v1.py b/experiments/issue645/v1.py deleted file mode 100755 index d88542b958..0000000000 --- a/experiments/issue645/v1.py +++ /dev/null @@ -1,82 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite=suites.suite_optimal_strips() - suite.extend(suites.suite_ipc14_opt_strips()) - - configs = { - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - - exp() - -main(revisions=['issue645-base', 'issue645-v1']) diff --git a/experiments/issue645/v2.py b/experiments/issue645/v2.py deleted file mode 100755 index 873786a41e..0000000000 --- a/experiments/issue645/v2.py +++ /dev/null @@ -1,76 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite=suites.suite_optimal_strips() - suite.extend(suites.suite_ipc14_opt_strips()) - - configs = { - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - - exp() - -main(revisions=['issue645-v1', 'issue645-v2']) diff --git a/experiments/issue645/v3.py b/experiments/issue645/v3.py deleted file mode 100755 index 579bc398d4..0000000000 --- a/experiments/issue645/v3.py +++ /dev/null @@ -1,78 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -#from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite=suites.suite_optimal_strips() - suite.extend(suites.suite_ipc14_opt_strips()) - - configs = { - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - ms_dfp_nogoalrelevantpair_counter = Attribute('ms_dfp_nogoalrelevantpair_counter', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ms_dfp_nogoalrelevantpair_counter, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - #for attribute in ["memory", "total_time"]: - #for config in configs: - #exp.add_report( - #RelativeScatterPlotReport( - #attributes=[attribute], - #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - #get_category=lambda run1, run2: run1.get("domain"), - #), - #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - #) - - exp() - -main(revisions=['issue645-v2', 'issue645-v3']) diff --git a/experiments/issue645/v4-random-seeds.py b/experiments/issue645/v4-random-seeds.py deleted file mode 100755 index 406aa2c96c..0000000000 --- a/experiments/issue645/v4-random-seeds.py +++ /dev/null @@ -1,74 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment - -def main(revisions=None): - suite=suites.suite_optimal_strips() - suite.extend(suites.suite_ipc14_opt_strips()) - - # only DFP configs - configs = { - # label reduction with seed 2016 - IssueConfig('dfp-b50k-lrs2016', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false,random_seed=2016)))']), - IssueConfig('dfp-ginf-lrs2016', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false,random_seed=2016)))']), - IssueConfig('dfp-f50k-lrs2016', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true,random_seed=2016)))']), - - # shrink fh/rnd with seed 2016 - IssueConfig('dfp-f50ks2016', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000,random_seed=2016),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('dfp-rnd50ks2016', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_random(max_states=50000,random_seed=2016),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - - # shrink fh/rnd with seed 2016 and with label reduction with seed 2016 - IssueConfig('dfp-f50ks2016-lrs2016', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000,random_seed=2016),label_reduction=exact(before_shrinking=false,before_merging=true,random_seed=2016)))']), - IssueConfig('dfp-rnd50ks2016-lrs2016', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_random(max_states=50000,random_seed=2016),label_reduction=exact(before_shrinking=false,before_merging=true,random_seed=2016)))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_absolute_report_step() - - exp() - -main(revisions=['issue645-v4']) diff --git a/experiments/issue645/v4.py b/experiments/issue645/v4.py deleted file mode 100755 index ae6f2a6761..0000000000 --- a/experiments/issue645/v4.py +++ /dev/null @@ -1,88 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - suite=suites.suite_optimal_strips() - suite.extend(suites.suite_ipc14_opt_strips()) - - configs = { - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - } - - exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - if matplotlib: - for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - - exp() - -main(revisions=['issue645-v3', 'issue645-v4']) diff --git a/experiments/issue648/common_setup.py b/experiments/issue648/common_setup.py deleted file mode 100644 index 953c87e27f..0000000000 --- a/experiments/issue648/common_setup.py +++ /dev/null @@ -1,356 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, suite, revisions=[], configs={}, grid_priority=None, - path=None, test_suite=None, email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(os.path.join(repo, "benchmarks"), suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue648/parser.py b/experiments/issue648/parser.py deleted file mode 100755 index 7878cb32c2..0000000000 --- a/experiments/issue648/parser.py +++ /dev/null @@ -1,46 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() - -def check_planner_exit_reason(content, props): - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - out_of_time = False - out_of_memory = False - if error == 'timeout': - out_of_time = True - elif error == 'out-of-memory': - out_of_memory = True - props['out_of_time'] = out_of_time - props['out_of_memory'] = out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -parser.parse() diff --git a/experiments/issue648/v1-opt-reparse.py b/experiments/issue648/v1-opt-reparse.py deleted file mode 100755 index 8cb103104a..0000000000 --- a/experiments/issue648/v1-opt-reparse.py +++ /dev/null @@ -1,57 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue648-base", "issue648-v1"] -SUITE=suites.suite_optimal_strips() -SUITE.extend(suites.suite_ipc14_opt_strips()) - -CONFIGS = [ - # Test label reduction, shrink_bucket_based (via shrink_fh and shrink_random) - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('dfp-r50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_random(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - # Test sampling - IssueConfig('ipdb', ['--search', 'astar(ipdb)']), - # Test genetic pattern generation - IssueConfig('genetic', ['--search', 'astar(zopdbs(patterns=genetic))']), - # Test cegar - IssueConfig( - "cegar-10K-goals-randomorder", - ["--search", "astar(cegar(subtasks=[goals(order=random)],max_states=10000,max_time=infinity))"]), - IssueConfig( - "cegar-10K-original-randomorder", - ["--search", "astar(cegar(subtasks=[original],max_states=10000,max_time=infinity,pick=random))"]), -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="silvan.sievers@unibas.ch" -) - -exp.add_fetcher('data/issue648-v1-opt-test', parsers=['parser.py']) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) -proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) -out_of_memory = Attribute('out_of_memory', absolute=True, min_wins=True) -out_of_time = Attribute('out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - out_of_memory, - out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) - -exp() diff --git a/experiments/issue648/v1-opt-test.py b/experiments/issue648/v1-opt-test.py deleted file mode 100755 index 24112b6eef..0000000000 --- a/experiments/issue648/v1-opt-test.py +++ /dev/null @@ -1,40 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue648-base", "issue648-v1"] -SUITE=suites.suite_optimal_strips() -SUITE.extend(suites.suite_ipc14_opt_strips()) - -CONFIGS = [ - # Test label reduction, shrink_bucket_based (via shrink_fh and shrink_random) - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('dfp-r50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_random(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - # Test sampling - IssueConfig('ipdb', ['--search', 'astar(ipdb)']), - # Test genetic pattern generation - IssueConfig('genetic', ['--search', 'astar(zopdbs(patterns=genetic))']), - # Test cegar - IssueConfig( - "cegar-10K-goals-randomorder", - ["--search", "astar(cegar(subtasks=[goals(order=random)],max_states=10000,max_time=infinity))"]), - IssueConfig( - "cegar-10K-original-randomorder", - ["--search", "astar(cegar(subtasks=[original],max_states=10000,max_time=infinity,pick=random))"]), -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="silvan.sievers@unibas.ch" -) - -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue648/v1-sat-reparse.py b/experiments/issue648/v1-sat-reparse.py deleted file mode 100755 index 6f42a10dd4..0000000000 --- a/experiments/issue648/v1-sat-reparse.py +++ /dev/null @@ -1,74 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue648-base", "issue648-v1"] -SUITE=suites.suite_satisficing() -SUITE.extend(suites.suite_ipc14_sat()) - -CONFIGS = [ - # Test lazy search with randomization - IssueConfig("lazy_greedy_ff_randomized", [ - "--heuristic", - "h=ff()", - "--search", - "lazy_greedy(h, preferred=h, randomize_successors=true)" - ]), - # Epsilon Greedy - IssueConfig("lazy_epsilon_greedy_ff", [ - "--heuristic", - "h=ff()", - "--search", - "lazy(epsilon_greedy(h))" - ]), - # Pareto - IssueConfig("lazy_pareto_ff_cea", [ - "--heuristic", - "h1=ff()", - "--heuristic", - "h2=cea()", - "--search", - "lazy(pareto([h1, h2]))" - ]), - # Type based - IssueConfig("ff-type-const", [ - "--heuristic", - "hff=ff(cost_type=one)", - "--search", - "lazy(alt([single(hff),single(hff, pref_only=true), type_based([const(1)])])," - "preferred=[hff],cost_type=one)" - ]), - -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="silvan.sievers@unibas.ch" -) - -exp.add_fetcher('data/issue648-v1-sat-test', parsers=['parser.py']) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) -proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) -out_of_memory = Attribute('out_of_memory', absolute=True, min_wins=True) -out_of_time = Attribute('out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - out_of_memory, - out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) - -exp() diff --git a/experiments/issue648/v1-sat-test.py b/experiments/issue648/v1-sat-test.py deleted file mode 100755 index 6e51c5db2d..0000000000 --- a/experiments/issue648/v1-sat-test.py +++ /dev/null @@ -1,62 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue648-base", "issue648-v1"] -SUITE=suites.suite_satisficing() -SUITE.extend(suites.suite_ipc14_sat()) - -CONFIGS = [ - # Test lazy search with randomization - IssueConfig("lazy_greedy_ff_randomized", [ - "--heuristic", - "h=ff()", - "--search", - "lazy_greedy(h, preferred=h, randomize_successors=true)" - ]), - # Epsilon Greedy - IssueConfig("lazy_epsilon_greedy_ff", [ - "--heuristic", - "h=ff()", - "--search", - "lazy(epsilon_greedy(h))" - ]), - # Pareto - IssueConfig("lazy_pareto_ff_cea", [ - "--heuristic", - "h1=ff()", - "--heuristic", - "h2=cea()", - "--search", - "lazy(pareto([h1, h2]))" - ]), - # Type based - IssueConfig("ff-type-const", [ - "--heuristic", - "hff=ff(cost_type=one)", - "--search", - "lazy(alt([single(hff),single(hff, pref_only=true), type_based([const(1)])])," - "preferred=[hff],cost_type=one)" - ]), - -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="silvan.sievers@unibas.ch" -) - -# Absolute report commented out because a comparison table is more useful for this issue. -# (It's still in this file because someone might want to use it as a basis.) -# Scatter plots commented out for now because I have no usable matplotlib available. -# exp.add_absolute_report_step() -exp.add_comparison_table_step() -# exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue648/v2-opt.py b/experiments/issue648/v2-opt.py deleted file mode 100755 index 9c71419367..0000000000 --- a/experiments/issue648/v2-opt.py +++ /dev/null @@ -1,57 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue648-base", "issue648-v2"] -SUITE=suites.suite_optimal_strips() -SUITE.extend(suites.suite_ipc14_opt_strips()) - -CONFIGS = [ - # Test label reduction, shrink_bucket_based (via shrink_fh and shrink_random) - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('dfp-r50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_random(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - # Test sampling - IssueConfig('ipdb', ['--search', 'astar(ipdb)']), - # Test genetic pattern generation - IssueConfig('genetic', ['--search', 'astar(zopdbs(patterns=genetic))']), - # Test cegar - IssueConfig( - "cegar-10K-goals-randomorder", - ["--search", "astar(cegar(subtasks=[goals(order=random)],max_states=10000,max_time=infinity))"]), - IssueConfig( - "cegar-10K-original-randomorder", - ["--search", "astar(cegar(subtasks=[original],max_states=10000,max_time=infinity,pick=random))"]), -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="silvan.sievers@unibas.ch" -) -exp.add_resource('parser', 'parser.py', dest='parser.py') -exp.add_command('parser', ['parser']) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) -proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) -out_of_memory = Attribute('out_of_memory', absolute=True, min_wins=True) -out_of_time = Attribute('out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - out_of_memory, - out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) - -exp() diff --git a/experiments/issue648/v2-sat.py b/experiments/issue648/v2-sat.py deleted file mode 100755 index 489584e61b..0000000000 --- a/experiments/issue648/v2-sat.py +++ /dev/null @@ -1,74 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute -from common_setup import IssueConfig, IssueExperiment - - -REVS = ["issue648-base", "issue648-v2"] -SUITE=suites.suite_satisficing() -SUITE.extend(suites.suite_ipc14_sat()) - -CONFIGS = [ - # Test lazy search with randomization - IssueConfig("lazy_greedy_ff_randomized", [ - "--heuristic", - "h=ff()", - "--search", - "lazy_greedy(h, preferred=h, randomize_successors=true)" - ]), - # Epsilon Greedy - IssueConfig("lazy_epsilon_greedy_ff", [ - "--heuristic", - "h=ff()", - "--search", - "lazy(epsilon_greedy(h))" - ]), - # Pareto - IssueConfig("lazy_pareto_ff_cea", [ - "--heuristic", - "h1=ff()", - "--heuristic", - "h2=cea()", - "--search", - "lazy(pareto([h1, h2]))" - ]), - # Type based - IssueConfig("ff-type-const", [ - "--heuristic", - "hff=ff(cost_type=one)", - "--search", - "lazy(alt([single(hff),single(hff, pref_only=true), type_based([const(1)])])," - "preferred=[hff],cost_type=one)" - ]), - -] - -exp = IssueExperiment( - revisions=REVS, - configs=CONFIGS, - suite=SUITE, - email="silvan.sievers@unibas.ch" -) -exp.add_resource('parser', 'parser.py', dest='parser.py') -exp.add_command('parser', ['parser']) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) -proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) -out_of_memory = Attribute('out_of_memory', absolute=True, min_wins=True) -out_of_time = Attribute('out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - out_of_memory, - out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) - -exp() diff --git a/experiments/issue650/common_setup.py b/experiments/issue650/common_setup.py deleted file mode 100644 index 953c87e27f..0000000000 --- a/experiments/issue650/common_setup.py +++ /dev/null @@ -1,356 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, suite, revisions=[], configs={}, grid_priority=None, - path=None, test_suite=None, email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(os.path.join(repo, "benchmarks"), suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue650/relativescatter.py b/experiments/issue650/relativescatter.py deleted file mode 100644 index 68e2529f34..0000000000 --- a/experiments/issue650/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue650/suites.py b/experiments/issue650/suites.py deleted file mode 100644 index ec030b6d4c..0000000000 --- a/experiments/issue650/suites.py +++ /dev/null @@ -1,315 +0,0 @@ -# Benchmark suites from the Fast Downward benchmark collection. - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-agl14-adl', - 'citycar-agl14-adl', - 'maintenance-agl14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-agl14-strips', - 'childsnack-agl14-strips', - 'floortile-agl14-strips', - 'ged-agl14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-agl14-strips', - 'tetris-agl14-strips', - 'thoughtful-agl14-strips', - 'transport-agl14-strips', - 'visitall-agl14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-mco14-adl', - 'citycar-mco14-adl', - 'maintenance-mco14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-mco14-strips', - 'floortile-mco14-strips', - 'ged-mco14-strips', - 'hiking-mco14-strips', - 'openstacks-mco14-strips', - 'parking-mco14-strips', - 'tetris-mco14-strips', - 'thoughtful-mco14-strips', - 'transport-mco14-strips', - 'visitall-mco14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-opt14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-sat14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat()) - - -def suite_unsolvable(): - # TODO: Add other unsolvable problems (Miconic-FullADL). - # TODO: Add 'fsc-grid-r:prize5x5_R.pddl' and 't0-uts:uts_r-02.pddl' - # if the extra-domains branch is merged. - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_alternative_formulations()) diff --git a/experiments/issue650/v1.py b/experiments/issue650/v1.py deleted file mode 100755 index e7856b0c0a..0000000000 --- a/experiments/issue650/v1.py +++ /dev/null @@ -1,40 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport -import suites - - -configs = [ - IssueConfig( - "cegar-landmarks-10k", - ["--search", "astar(cegar(subtasks=[landmarks()],max_states=10000))"]), - IssueConfig( - "cegar-landmarks-goals-900s", - ["--search", "astar(cegar(subtasks=[landmarks(),goals()],max_time=900))"]), -] -revisions = ["issue650-base", "issue650-v1"] - -exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suites.suite_optimal_strips(), - test_suite=["depot:pfile1"], - email="jendrik.seipp@unibas.ch", -) - -exp.add_comparison_table_step() - -for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - -exp() diff --git a/experiments/issue650/v2.py b/experiments/issue650/v2.py deleted file mode 100755 index 40cc56e66b..0000000000 --- a/experiments/issue650/v2.py +++ /dev/null @@ -1,43 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport -import suites - - -configs = [ - IssueConfig( - "cegar-landmarks-10k", - ["--search", "astar(cegar(subtasks=[landmarks()],max_states=10000))"]), - IssueConfig( - "cegar-landmarks-goals-900s-debug", - ["--search", "astar(cegar(subtasks=[landmarks(),goals()],max_time=900))"], - build_options=["--debug"], - driver_options=["--debug"], - ), -] -revisions = ["issue650-base", "issue650-v2"] - -exp = IssueExperiment( - revisions=revisions, - configs=configs, - suite=suites.suite_optimal_strips(), - test_suite=["depot:pfile1"], - email="jendrik.seipp@unibas.ch", -) - -exp.add_comparison_table_step() - -for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - -exp() diff --git a/experiments/issue655/base-v1.py b/experiments/issue655/base-v1.py deleted file mode 100755 index aaf4010a56..0000000000 --- a/experiments/issue655/base-v1.py +++ /dev/null @@ -1,94 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - # dummy configs with correct names so that comparison report works - configs = { - IssueConfig('rl-b50k', []), - IssueConfig('cggl-b50k', []), - IssueConfig('dfp-b50k', []), - IssueConfig('rl-ginf', []), - IssueConfig('cggl-ginf', []), - IssueConfig('dfp-ginf', []), - IssueConfig('rl-f50k', []), - IssueConfig('cggl-f50k', []), - IssueConfig('dfp-f50k', []), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_fetcher('data/issue655-base-eval') - exp.add_fetcher('data/issue655-v1-eval') - - exp.add_comparison_table_step() - - if matplotlib: - for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - - exp() - -main(revisions=['issue655-base', 'issue655-v1']) diff --git a/experiments/issue655/base.py b/experiments/issue655/base.py deleted file mode 100755 index cc3988d047..0000000000 --- a/experiments/issue655/base.py +++ /dev/null @@ -1,90 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - #exp.add_comparison_table_step() - - #if matplotlib: - #for attribute in ["memory", "total_time"]: - #for config in configs: - #exp.add_report( - #RelativeScatterPlotReport( - #attributes=[attribute], - #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - #get_category=lambda run1, run2: run1.get("domain"), - #), - #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - #) - - exp() - -main(revisions=['issue655-base']) diff --git a/experiments/issue655/common_setup.py b/experiments/issue655/common_setup.py deleted file mode 100644 index 934531f15d..0000000000 --- a/experiments/issue655/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, - grid_priority=None, path=None, test_suite=None, - email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(benchmarks_dir, suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue655/ms-parser.py b/experiments/issue655/ms-parser.py deleted file mode 100755 index c219b72ba5..0000000000 --- a/experiments/issue655/ms-parser.py +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) -parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) -parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[t=.+s\]', required=False, type=float) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -parser.parse() diff --git a/experiments/issue655/relativescatter.py b/experiments/issue655/relativescatter.py deleted file mode 100644 index 68e2529f34..0000000000 --- a/experiments/issue655/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue655/suites.py b/experiments/issue655/suites.py deleted file mode 100644 index 4615212cfd..0000000000 --- a/experiments/issue655/suites.py +++ /dev/null @@ -1,350 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import argparse -import textwrap - - -HELP = "Convert suite name to list of domains or tasks." - - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted(set( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat())) - - -def suite_unsolvable(): - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt() + suite_ipc14_opt_strips()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat() + suite_ipc14_sat_strips()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("suite", help="suite name") - return parser.parse_args() - - -def main(): - prefix = "suite_" - suite_names = [ - name[len(prefix):] for name in sorted(globals().keys()) - if name.startswith(prefix)] - parser = argparse.ArgumentParser(description=HELP) - parser.add_argument("suite", choices=suite_names, help="suite name") - parser.add_argument( - "--width", default=72, type=int, - help="output line width (default: %(default)s). Use 1 for single " - "column.") - args = parser.parse_args() - suite_func = globals()[prefix + args.suite] - print(textwrap.fill( - str(suite_func()), - width=args.width, - break_long_words=False, - break_on_hyphens=False)) - - -if __name__ == "__main__": - main() diff --git a/experiments/issue655/v1.py b/experiments/issue655/v1.py deleted file mode 100755 index c653f4f6b1..0000000000 --- a/experiments/issue655/v1.py +++ /dev/null @@ -1,90 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not available, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - #exp.add_comparison_table_step() - - #if matplotlib: - #for attribute in ["memory", "total_time"]: - #for config in configs: - #exp.add_report( - #RelativeScatterPlotReport( - #attributes=[attribute], - #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - #get_category=lambda run1, run2: run1.get("domain"), - #), - #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - #) - - exp() - -main(revisions=['issue655-v1']) diff --git a/experiments/issue656/common_setup.py b/experiments/issue656/common_setup.py deleted file mode 100644 index 934531f15d..0000000000 --- a/experiments/issue656/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, - grid_priority=None, path=None, test_suite=None, - email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(benchmarks_dir, suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue656/ms-parser.py b/experiments/issue656/ms-parser.py deleted file mode 100755 index c219b72ba5..0000000000 --- a/experiments/issue656/ms-parser.py +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) -parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) -parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[t=.+s\]', required=False, type=float) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -parser.parse() diff --git a/experiments/issue656/relativescatter.py b/experiments/issue656/relativescatter.py deleted file mode 100644 index 68e2529f34..0000000000 --- a/experiments/issue656/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue656/suites.py b/experiments/issue656/suites.py deleted file mode 100644 index 4615212cfd..0000000000 --- a/experiments/issue656/suites.py +++ /dev/null @@ -1,350 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import argparse -import textwrap - - -HELP = "Convert suite name to list of domains or tasks." - - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted(set( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat())) - - -def suite_unsolvable(): - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt() + suite_ipc14_opt_strips()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat() + suite_ipc14_sat_strips()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("suite", help="suite name") - return parser.parse_args() - - -def main(): - prefix = "suite_" - suite_names = [ - name[len(prefix):] for name in sorted(globals().keys()) - if name.startswith(prefix)] - parser = argparse.ArgumentParser(description=HELP) - parser.add_argument("suite", choices=suite_names, help="suite name") - parser.add_argument( - "--width", default=72, type=int, - help="output line width (default: %(default)s). Use 1 for single " - "column.") - args = parser.parse_args() - suite_func = globals()[prefix + args.suite] - print(textwrap.fill( - str(suite_func()), - width=args.width, - break_long_words=False, - break_on_hyphens=False)) - - -if __name__ == "__main__": - main() diff --git a/experiments/issue656/v1-new-configs.py b/experiments/issue656/v1-new-configs.py deleted file mode 100755 index 9f224799ea..0000000000 --- a/experiments/issue656/v1-new-configs.py +++ /dev/null @@ -1,88 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('dfp-reg-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-reg-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-reg-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-inv-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-inv-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-inv-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-rnd-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=true),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-rnd-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=random,atomic_before_product=true),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-reg-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-reg-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-reg-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=regular,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-inv-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-inv-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-inv-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=inverse,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-rnd-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=false),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-rnd-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(atomic_ts_order=random,product_ts_order=random,atomic_before_product=false),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-allrnd-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp(randomized_order=true),shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_absolute_report_step() - - exp() - -main(revisions=['issue656-v1']) diff --git a/experiments/issue656/v1.py b/experiments/issue656/v1.py deleted file mode 100755 index 0ca34128fc..0000000000 --- a/experiments/issue656/v1.py +++ /dev/null @@ -1,84 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1,greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1,greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false)))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(max_states=50000),label_reduction=exact(before_shrinking=false,before_merging=true)))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - if matplotlib: - for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - - exp() - -main(revisions=['issue656-base', 'issue656-v1']) diff --git a/experiments/issue657/common_setup.py b/experiments/issue657/common_setup.py deleted file mode 100644 index e8282d9863..0000000000 --- a/experiments/issue657/common_setup.py +++ /dev/null @@ -1,337 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - revisions = revisions or [] - configs = configs or [] - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step( - 'publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step( - "publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step(step_name, make_scatter_plots)) diff --git a/experiments/issue657/relativescatter.py b/experiments/issue657/relativescatter.py deleted file mode 100644 index 01fd00b1d0..0000000000 --- a/experiments/issue657/relativescatter.py +++ /dev/null @@ -1,110 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict -import logging - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - logging.critical("Can only compare 2 configs") - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - if self.ylim_bottom == self.ylim_top: - self.ylim_bottom *= 0.95 - self.ylim_top *= 1.05 - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue657/v1.py b/experiments/issue657/v1.py deleted file mode 100755 index 6dcfd8e262..0000000000 --- a/experiments/issue657/v1.py +++ /dev/null @@ -1,58 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue657-v1-base", "issue657-v1"] -CONFIGS = [ - IssueConfig(heuristic, ["--search", "astar({})".format(heuristic)]) - for heuristic in [ - "cegar(max_states=10000)", - "cegar(subtasks=[original()],max_states=10000)"] -] -SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', - 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', - 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(attributes=["total_time"]) - -exp() diff --git a/experiments/issue657/v2.py b/experiments/issue657/v2.py deleted file mode 100755 index d34a285783..0000000000 --- a/experiments/issue657/v2.py +++ /dev/null @@ -1,65 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run, get_repo_base -from relativescatter import RelativeScatterPlotReport - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = get_repo_base() -SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', - 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', - 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_algorithm( - "01:issue657-v2-base:cegar", - REPO, - "issue657-v2-base", - ["--search", "astar(cegar(max_states=10000,max_time=infinity))"]) -exp.add_algorithm( - "02:issue657-v2:cegar", - REPO, - "issue657-v2", - ["--search", "astar(cegar(max_states=10000,max_time=infinity,max_transitions=infinity))"]) - -exp.add_absolute_report_step() -exp.add_report(RelativeScatterPlotReport( - filter_config=["01:issue657-v2-base:cegar", "02:issue657-v2:cegar"], - attributes=["total_time"], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)), - outfile="issue657-base-vs-v2.png") - -exp() diff --git a/experiments/issue657/v3.py b/experiments/issue657/v3.py deleted file mode 100755 index 6c90bfeb5a..0000000000 --- a/experiments/issue657/v3.py +++ /dev/null @@ -1,58 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue657-v2", "issue657-v3"] -CONFIGS = [ - IssueConfig(heuristic, ["--search", "astar({})".format(heuristic)]) - for heuristic in [ - "cegar(subtasks=[landmarks(),goals()],max_transitions=1000000)", - "cegar(subtasks=[original()],max_transitions=1000000)"] -] -SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', - 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', - 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time"]) - -exp() diff --git a/experiments/issue657/v4.py b/experiments/issue657/v4.py deleted file mode 100755 index 12372f1f18..0000000000 --- a/experiments/issue657/v4.py +++ /dev/null @@ -1,58 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue657-v3", "issue657-v4"] -CONFIGS = [ - IssueConfig(heuristic, ["--search", "astar({})".format(heuristic)]) - for heuristic in [ - "cegar(subtasks=[landmarks(),goals()],max_transitions=1000000)", - "cegar(subtasks=[original()],max_transitions=1000000)"] -] -SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', - 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', - 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time"]) - -exp() diff --git a/experiments/issue658/common_setup.py b/experiments/issue658/common_setup.py deleted file mode 100644 index 934531f15d..0000000000 --- a/experiments/issue658/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, - grid_priority=None, path=None, test_suite=None, - email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(benchmarks_dir, suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue658/ms-parser.py b/experiments/issue658/ms-parser.py deleted file mode 100755 index c219b72ba5..0000000000 --- a/experiments/issue658/ms-parser.py +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) -parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) -parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[t=.+s\]', required=False, type=float) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -parser.parse() diff --git a/experiments/issue658/relativescatter.py b/experiments/issue658/relativescatter.py deleted file mode 100644 index 68e2529f34..0000000000 --- a/experiments/issue658/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue658/suites.py b/experiments/issue658/suites.py deleted file mode 100644 index 4615212cfd..0000000000 --- a/experiments/issue658/suites.py +++ /dev/null @@ -1,350 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import argparse -import textwrap - - -HELP = "Convert suite name to list of domains or tasks." - - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted(set( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat())) - - -def suite_unsolvable(): - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt() + suite_ipc14_opt_strips()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat() + suite_ipc14_sat_strips()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("suite", help="suite name") - return parser.parse_args() - - -def main(): - prefix = "suite_" - suite_names = [ - name[len(prefix):] for name in sorted(globals().keys()) - if name.startswith(prefix)] - parser = argparse.ArgumentParser(description=HELP) - parser.add_argument("suite", choices=suite_names, help="suite name") - parser.add_argument( - "--width", default=72, type=int, - help="output line width (default: %(default)s). Use 1 for single " - "column.") - args = parser.parse_args() - suite_func = globals()[prefix + args.suite] - print(textwrap.fill( - str(suite_func()), - width=args.width, - break_long_words=False, - break_on_hyphens=False)) - - -if __name__ == "__main__": - main() diff --git a/experiments/issue658/v1.py b/experiments/issue658/v1.py deleted file mode 100755 index 4df2198006..0000000000 --- a/experiments/issue658/v1.py +++ /dev/null @@ -1,90 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=reverse_level),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_linear(variable_order=cg_goal_level),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - if matplotlib: - for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - - exp() - -main(revisions=['issue658-base', 'issue658-v1']) diff --git a/experiments/issue658/v2-dfp.py b/experiments/issue658/v2-dfp.py deleted file mode 100755 index 16376a1358..0000000000 --- a/experiments/issue658/v2-dfp.py +++ /dev/null @@ -1,84 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_dfp,shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - if matplotlib: - for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - - exp() - -main(revisions=['issue658-base', 'issue658-v2']) diff --git a/experiments/issue659/common_setup.py b/experiments/issue659/common_setup.py deleted file mode 100644 index 4dff4aacfd..0000000000 --- a/experiments/issue659/common_setup.py +++ /dev/null @@ -1,335 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step( - 'publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step( - "publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step(step_name, make_scatter_plots)) diff --git a/experiments/issue659/relativescatter.py b/experiments/issue659/relativescatter.py deleted file mode 100644 index 14d5d42752..0000000000 --- a/experiments/issue659/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue659/v1.py b/experiments/issue659/v1.py deleted file mode 100755 index 9f9b88fe2b..0000000000 --- a/experiments/issue659/v1.py +++ /dev/null @@ -1,57 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue659-v1-base", "issue659-v1"] -CONFIGS = [ - IssueConfig(heuristic, ["--search", "astar({})".format(heuristic)]) - for heuristic in [ - "cegar(max_states=10000,max_time=infinity)"] -] -SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery', - 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-small', 'rovers', 'satellite', 'scanalyzer-08-strips', - 'scanalyzer-opt11-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'storage', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(attributes=["total_time"]) - -exp() diff --git a/experiments/issue660/common_setup.py b/experiments/issue660/common_setup.py deleted file mode 100644 index 934531f15d..0000000000 --- a/experiments/issue660/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, - grid_priority=None, path=None, test_suite=None, - email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(benchmarks_dir, suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue660/relativescatter.py b/experiments/issue660/relativescatter.py deleted file mode 100644 index 68e2529f34..0000000000 --- a/experiments/issue660/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue660/suites.py b/experiments/issue660/suites.py deleted file mode 100644 index 4615212cfd..0000000000 --- a/experiments/issue660/suites.py +++ /dev/null @@ -1,350 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import argparse -import textwrap - - -HELP = "Convert suite name to list of domains or tasks." - - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted(set( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat())) - - -def suite_unsolvable(): - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt() + suite_ipc14_opt_strips()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat() + suite_ipc14_sat_strips()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("suite", help="suite name") - return parser.parse_args() - - -def main(): - prefix = "suite_" - suite_names = [ - name[len(prefix):] for name in sorted(globals().keys()) - if name.startswith(prefix)] - parser = argparse.ArgumentParser(description=HELP) - parser.add_argument("suite", choices=suite_names, help="suite name") - parser.add_argument( - "--width", default=72, type=int, - help="output line width (default: %(default)s). Use 1 for single " - "column.") - args = parser.parse_args() - suite_func = globals()[prefix + args.suite] - print(textwrap.fill( - str(suite_func()), - width=args.width, - break_long_words=False, - break_on_hyphens=False)) - - -if __name__ == "__main__": - main() diff --git a/experiments/issue660/v1.py b/experiments/issue660/v1.py deleted file mode 100755 index 1d5c1814f5..0000000000 --- a/experiments/issue660/v1.py +++ /dev/null @@ -1,82 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not available, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks') - suite=suites.suite_satisficing() - - configs = { - IssueConfig('lazy-greedy-ff', [ - '--heuristic', - 'h=ff()', - '--search', - 'lazy_greedy(h, preferred=h)' - ]), - IssueConfig('lama-first', [], - driver_options=['--alias', 'lama-first'] - ), - IssueConfig('eager_greedy_cg', [ - '--heuristic', - 'h=cg()', - '--search', - 'eager_greedy(h, preferred=h)' - ]), - IssueConfig('eager_greedy_cea', [ - '--heuristic', - 'h=cea()', - '--search', - 'eager_greedy(h, preferred=h)' - ]), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step( - filter_domain=[ - 'assembly', - 'miconic-fulladl', - 'openstacks', - 'openstacks-sat08-adl', - 'optical-telegraphs', - 'philosophers', - 'psr-large', - 'psr-middle', - 'trucks', - ], - ) - - if matplotlib: - for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - - exp() - -main(revisions=['issue660-base', 'issue660-v1']) diff --git a/experiments/issue660/v2.py b/experiments/issue660/v2.py deleted file mode 100755 index d1bbf06e9b..0000000000 --- a/experiments/issue660/v2.py +++ /dev/null @@ -1,80 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not available, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks') - suite = [ - 'assembly', - 'miconic-fulladl', - 'openstacks', - 'openstacks-sat08-adl', - 'optical-telegraphs', - 'philosophers', - 'psr-large', - 'psr-middle', - 'trucks', - ] - - configs = { - IssueConfig('lazy-greedy-ff', [ - '--heuristic', - 'h=ff()', - '--search', - 'lazy_greedy(h, preferred=h)' - ]), - IssueConfig('lama-first', [], - driver_options=['--alias', 'lama-first'] - ), - IssueConfig('eager_greedy_cg', [ - '--heuristic', - 'h=cg()', - '--search', - 'eager_greedy(h, preferred=h)' - ]), - IssueConfig('eager_greedy_cea', [ - '--heuristic', - 'h=cea()', - '--search', - 'eager_greedy(h, preferred=h)' - ]), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - if matplotlib: - for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - - exp() - -main(revisions=['issue660-v2-base', 'issue660-v2']) diff --git a/experiments/issue662/common_setup.py b/experiments/issue662/common_setup.py deleted file mode 100644 index 5d2b40b61c..0000000000 --- a/experiments/issue662/common_setup.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue662/relativescatter.py b/experiments/issue662/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue662/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue662/v1.py b/experiments/issue662/v1.py deleted file mode 100755 index 00485c72a1..0000000000 --- a/experiments/issue662/v1.py +++ /dev/null @@ -1,77 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment, get_algo_nick, get_repo_base -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue662-base", "issue662-v1"] -CONFIGS = [ - IssueConfig( - 'astar-lmcut-static', - ['--search', 'astar(lmcut())'], - build_options=["release32"], - driver_options=["--build=release32", "--search-time-limit", "60s"] - ), - IssueConfig( - 'astar-lmcut-dynamic', - ['--search', 'astar(lmcut())'], - build_options=["release32dynamic"], - driver_options=["--build=release32dynamic", "--search-time-limit", "60s"] - ) -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=[], - configs=[], - environment=ENVIRONMENT, -) - -for rev in REVISIONS: - for config in CONFIGS: - if rev.endswith("base") and config.nick.endswith("dynamic"): - continue - exp.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["total_time"]: - for algo1, algo2 in [("issue662-base-astar-lmcut-static", - "issue662-v1-astar-lmcut-static"), - ("issue662-v1-astar-lmcut-static", - "issue662-v1-astar-lmcut-dynamic")]: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=[algo1, algo2], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, algo1, algo2) - ) - -exp.run_steps() diff --git a/experiments/issue665/common_setup.py b/experiments/issue665/common_setup.py deleted file mode 100644 index 934531f15d..0000000000 --- a/experiments/issue665/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, - grid_priority=None, path=None, test_suite=None, - email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(benchmarks_dir, suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue665/relativescatter.py b/experiments/issue665/relativescatter.py deleted file mode 100644 index 41a8385a87..0000000000 --- a/experiments/issue665/relativescatter.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -# -# downward uses the lab package to conduct experiments with the -# Fast Downward planning system. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from collections import defaultdict -import os - -from lab import tools - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue665/v1.py b/experiments/issue665/v1.py deleted file mode 100755 index f2725f4b90..0000000000 --- a/experiments/issue665/v1.py +++ /dev/null @@ -1,45 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -import os - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('astar-blind', ['--search', 'astar(blind())'], - driver_options=['--search-time-limit', '5m']), - } - - exp = IssueExperiment( - benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks'), - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - attribute = "total_time" - config_nick = 'astar-blind' - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config_nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config_nick) - ) - - exp() - -main(revisions=['issue665-base', 'issue665-v1']) diff --git a/experiments/issue665/v2.py b/experiments/issue665/v2.py deleted file mode 100755 index d740f12235..0000000000 --- a/experiments/issue665/v2.py +++ /dev/null @@ -1,45 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -from lab.reports import Attribute, gm - -import os - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - suite = suites.suite_optimal_with_ipc11() - - configs = { - IssueConfig('astar-blind', ['--search', 'astar(blind())'], - driver_options=['--search-time-limit', '5m']), - } - - exp = IssueExperiment( - benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks'), - revisions=revisions, - configs=configs, - suite=suite, - test_suite=['depot:pfile1'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - exp.add_comparison_table_step() - - attribute = "total_time" - config_nick = 'astar-blind' - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config_nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config_nick) - ) - - exp() - -main(revisions=['issue665-base', 'issue665-v2']) diff --git a/experiments/issue666/common_setup.py b/experiments/issue666/common_setup.py deleted file mode 100644 index 934531f15d..0000000000 --- a/experiments/issue666/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, - grid_priority=None, path=None, test_suite=None, - email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(benchmarks_dir, suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue666/ms-parser.py b/experiments/issue666/ms-parser.py deleted file mode 100755 index c219b72ba5..0000000000 --- a/experiments/issue666/ms-parser.py +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) -parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) -parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[t=.+s\]', required=False, type=float) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -parser.parse() diff --git a/experiments/issue666/relativescatter.py b/experiments/issue666/relativescatter.py deleted file mode 100644 index 68e2529f34..0000000000 --- a/experiments/issue666/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue666/suites.py b/experiments/issue666/suites.py deleted file mode 100644 index 4615212cfd..0000000000 --- a/experiments/issue666/suites.py +++ /dev/null @@ -1,350 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import argparse -import textwrap - - -HELP = "Convert suite name to list of domains or tasks." - - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted(set( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat())) - - -def suite_unsolvable(): - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt() + suite_ipc14_opt_strips()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat() + suite_ipc14_sat_strips()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("suite", help="suite name") - return parser.parse_args() - - -def main(): - prefix = "suite_" - suite_names = [ - name[len(prefix):] for name in sorted(globals().keys()) - if name.startswith(prefix)] - parser = argparse.ArgumentParser(description=HELP) - parser.add_argument("suite", choices=suite_names, help="suite name") - parser.add_argument( - "--width", default=72, type=int, - help="output line width (default: %(default)s). Use 1 for single " - "column.") - args = parser.parse_args() - suite_func = globals()[prefix + args.suite] - print(textwrap.fill( - str(suite_func()), - width=args.width, - break_long_words=False, - break_on_hyphens=False)) - - -if __name__ == "__main__": - main() diff --git a/experiments/issue666/v1.py b/experiments/issue666/v1.py deleted file mode 100755 index 2dc304ec66..0000000000 --- a/experiments/issue666/v1.py +++ /dev/null @@ -1,90 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=cg_goal_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=cg_goal_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=cg_goal_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - #if matplotlib: - #for attribute in ["memory", "total_time"]: - #for config in configs: - #exp.add_report( - #RelativeScatterPlotReport( - #attributes=[attribute], - #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - #get_category=lambda run1, run2: run1.get("domain"), - #), - #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - #) - - exp() - -main(revisions=['issue666-base', 'issue666-v1']) diff --git a/experiments/issue667/common_setup.py b/experiments/issue667/common_setup.py deleted file mode 100644 index b3e8877e77..0000000000 --- a/experiments/issue667/common_setup.py +++ /dev/null @@ -1,386 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, - grid_priority=None, path=None, test_suite=None, - email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(benchmarks_dir, suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, name='', **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - *name* is a custom name for the report. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - if name == '': - name = get_experiment_name() - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - name + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_custom_comparison_table_step(self, name, **kwargs): - """Add a step that compares the configurations given in - *compared_configs*. - - *compared_configs* must be specified. See CompareConfigsReport class. - - *name* is a custom name for the report. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = CompareConfigsReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - name + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-custom-comparison-report', - subprocess.call, - ['publish', outfile])) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue667/ms-parser.py b/experiments/issue667/ms-parser.py deleted file mode 100755 index c219b72ba5..0000000000 --- a/experiments/issue667/ms-parser.py +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) -parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) -parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[t=.+s\]', required=False, type=float) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -parser.parse() diff --git a/experiments/issue667/relativescatter.py b/experiments/issue667/relativescatter.py deleted file mode 100644 index 68e2529f34..0000000000 --- a/experiments/issue667/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue667/suites.py b/experiments/issue667/suites.py deleted file mode 100644 index 4615212cfd..0000000000 --- a/experiments/issue667/suites.py +++ /dev/null @@ -1,350 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import argparse -import textwrap - - -HELP = "Convert suite name to list of domains or tasks." - - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted(set( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat())) - - -def suite_unsolvable(): - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt() + suite_ipc14_opt_strips()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat() + suite_ipc14_sat_strips()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("suite", help="suite name") - return parser.parse_args() - - -def main(): - prefix = "suite_" - suite_names = [ - name[len(prefix):] for name in sorted(globals().keys()) - if name.startswith(prefix)] - parser = argparse.ArgumentParser(description=HELP) - parser.add_argument("suite", choices=suite_names, help="suite name") - parser.add_argument( - "--width", default=72, type=int, - help="output line width (default: %(default)s). Use 1 for single " - "column.") - args = parser.parse_args() - suite_func = globals()[prefix + args.suite] - print(textwrap.fill( - str(suite_func()), - width=args.width, - break_long_words=False, - break_on_hyphens=False)) - - -if __name__ == "__main__": - main() diff --git a/experiments/issue667/v1-v2.py b/experiments/issue667/v1-v2.py deleted file mode 100755 index a940f85b54..0000000000 --- a/experiments/issue667/v1-v2.py +++ /dev/null @@ -1,192 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('sccs-top-dfp-rl-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-rl-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-rl-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-l-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-l-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-l-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-rnd-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-rnd-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-rl-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-rl-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-rl-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-l-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-l-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-l-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-rnd-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-rnd-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-top-dfp-allrnd-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,single_random])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_absolute_report_step(name='issue667-v1-abp',filter_config=[ - '%s-sccs-top-dfp-rl-otn-abp-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-rl-rnd-abp-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-l-otn-abp-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-l-rnd-abp-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-rnd-otn-abp-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-rnd-rnd-abp-b50k' % 'issue667-v1', - ]) - exp.add_absolute_report_step(name='issue667-v1-pba',filter_config=[ - '%s-sccs-top-dfp-rl-otn-pba-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-rl-rnd-pba-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-l-otn-pba-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-l-rnd-pba-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-rnd-otn-pba-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-rnd-rnd-pba-b50k' % 'issue667-v1', - ]) - exp.add_absolute_report_step(name='issue667-v2-abp',filter_config=[ - '%s-sccs-top-dfp-rl-otn-abp-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-rl-rnd-abp-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-l-otn-abp-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-l-rnd-abp-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-rnd-otn-abp-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-rnd-rnd-abp-b50k' % 'issue667-v2', - ]) - exp.add_absolute_report_step(name='issue667-v2-pba',filter_config=[ - '%s-sccs-top-dfp-rl-otn-pba-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-rl-rnd-pba-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-l-otn-pba-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-l-rnd-pba-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-rnd-otn-pba-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-rnd-rnd-pba-b50k' % 'issue667-v2', - ]) - exp.add_custom_comparison_table_step(name='issue667-compare-v1-v2-abp',compared_configs=[ - ('%s-sccs-top-dfp-rl-otn-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-otn-abp-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-rl-rnd-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-rnd-abp-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-l-otn-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-otn-abp-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-l-rnd-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-rnd-abp-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-rnd-otn-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-otn-abp-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-rnd-rnd-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-rnd-abp-b50k' % 'issue667-v2'), - ]) - exp.add_custom_comparison_table_step(name='issue667-compare-v1-v2-pba',compared_configs=[ - ('%s-sccs-top-dfp-rl-otn-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-otn-pba-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-rl-rnd-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-rnd-pba-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-l-otn-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-otn-pba-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-l-rnd-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-rnd-pba-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-rnd-otn-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-otn-pba-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-rnd-rnd-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-rnd-pba-b50k' % 'issue667-v2'), - ]) - exp.add_absolute_report_step(name='issue667-v1-paper',filter_config=[ - '%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v1', - '%s-sccs-top-dfp-allrnd-b50k' % 'issue667-v1', - ]) - exp.add_absolute_report_step(name='issue667-v2-paper',filter_config=[ - '%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v2', - '%s-sccs-top-dfp-allrnd-b50k' % 'issue667-v2', - ]) - exp.add_custom_comparison_table_step(name='issue667-compare-v1-v2-paper',compared_configs=[ - ('%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-nto-abp-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-nto-abp-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-nto-abp-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rl-nto-pba-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-l-nto-pba-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v1', '%s-sccs-top-dfp-rnd-nto-pba-b50k' % 'issue667-v2'), - ('%s-sccs-top-dfp-allrnd-b50k' % 'issue667-v1', '%s-sccs-top-dfp-allrnd-b50k' % 'issue667-v2'), - ]) - - #if matplotlib: - #for attribute in ["memory", "total_time"]: - #for config in configs: - #exp.add_report( - #RelativeScatterPlotReport( - #attributes=[attribute], - #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - #get_category=lambda run1, run2: run1.get("domain"), - #), - #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - #) - - exp() - -main(revisions=['issue667-v1', 'issue667-v2']) diff --git a/experiments/issue668/common_setup.py b/experiments/issue668/common_setup.py deleted file mode 100644 index 9dae4a11e0..0000000000 --- a/experiments/issue668/common_setup.py +++ /dev/null @@ -1,389 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - "unsolvable", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if matplotlib: - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue668/ms-parser.py b/experiments/issue668/ms-parser.py deleted file mode 100755 index a5491399d7..0000000000 --- a/experiments/issue668/ms-parser.py +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) -parser.add_pattern('ms_atomic_construction_time', 't=(.+)s \(after computation of atomic transition systems\)', required=False, type=float) -parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -parser.parse() diff --git a/experiments/issue668/relativescatter.py b/experiments/issue668/relativescatter.py deleted file mode 100644 index 68e2529f34..0000000000 --- a/experiments/issue668/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue668/suites.py b/experiments/issue668/suites.py deleted file mode 100644 index 4615212cfd..0000000000 --- a/experiments/issue668/suites.py +++ /dev/null @@ -1,350 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import argparse -import textwrap - - -HELP = "Convert suite name to list of domains or tasks." - - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted(set( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat())) - - -def suite_unsolvable(): - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt() + suite_ipc14_opt_strips()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat() + suite_ipc14_sat_strips()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("suite", help="suite name") - return parser.parse_args() - - -def main(): - prefix = "suite_" - suite_names = [ - name[len(prefix):] for name in sorted(globals().keys()) - if name.startswith(prefix)] - parser = argparse.ArgumentParser(description=HELP) - parser.add_argument("suite", choices=suite_names, help="suite name") - parser.add_argument( - "--width", default=72, type=int, - help="output line width (default: %(default)s). Use 1 for single " - "column.") - args = parser.parse_args() - suite_func = globals()[prefix + args.suite] - print(textwrap.fill( - str(suite_func()), - width=args.width, - break_long_words=False, - break_on_hyphens=False)) - - -if __name__ == "__main__": - main() diff --git a/experiments/issue668/v1.py b/experiments/issue668/v1.py deleted file mode 100755 index 289c5229fa..0000000000 --- a/experiments/issue668/v1.py +++ /dev/null @@ -1,130 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('sbf-miasm-rl-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-allrnd-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),single_random])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_absolute_report_step(name='issue668-v1-abp',filter_config=[ - '%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v1', - '%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v1', - '%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v1', - '%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v1', - '%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v1', - '%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v1', - '%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v1', - '%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v1', - '%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v1', - ]) - exp.add_absolute_report_step(name='issue668-v1-pba',filter_config=[ - '%s-sbf-miasm-rl-otn-pba-b50k' % 'issue668-v1', - '%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v1', - '%s-sbf-miasm-rl-rnd-pba-b50k' % 'issue668-v1', - '%s-sbf-miasm-l-otn-pba-b50k' % 'issue668-v1', - '%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v1', - '%s-sbf-miasm-l-rnd-pba-b50k' % 'issue668-v1', - '%s-sbf-miasm-rnd-otn-pba-b50k' % 'issue668-v1', - '%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v1', - '%s-sbf-miasm-rnd-rnd-pba-b50k' % 'issue668-v1', - ]) - exp.add_absolute_report_step(name='issue668-v1-paper',filter_config=[ - '%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v1', - '%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v1', - '%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v1', - '%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v1', - '%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v1', - '%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v1', - '%s-sbf-miasm-allrnd-b50k' % 'issue668-v1', - ]) - - #if matplotlib: - #for attribute in ["memory", "total_time"]: - #for config in configs: - #exp.add_report( - #RelativeScatterPlotReport( - #attributes=[attribute], - #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - #get_category=lambda run1, run2: run1.get("domain"), - #), - #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - #) - - exp() - -main(revisions=['issue668-v1']) diff --git a/experiments/issue668/v2-v4-compare.py b/experiments/issue668/v2-v4-compare.py deleted file mode 100755 index 4eabb2480e..0000000000 --- a/experiments/issue668/v2-v4-compare.py +++ /dev/null @@ -1,100 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = [] -CONFIGS = [] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_fetcher('data/issue668-v2-eval') -exp.add_fetcher('data/issue668-v4-eval') - -exp.add_report(ComparativeReport(attributes=attributes,algorithm_pairs=[ - ('%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v2','%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v2','%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v4'), -]),outfile='issue668-v2-v4-compare-abp.html') -exp.add_report(ComparativeReport(attributes=attributes,algorithm_pairs=[ - ('%s-sbf-miasm-rl-otn-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rl-otn-pba-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-rl-rnd-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rl-rnd-pba-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-l-otn-pba-b50k' % 'issue668-v2','%s-sbf-miasm-l-otn-pba-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-l-rnd-pba-b50k' % 'issue668-v2','%s-sbf-miasm-l-rnd-pba-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-rnd-otn-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-otn-pba-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-rnd-rnd-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-rnd-pba-b50k' % 'issue668-v4'), -]),name='issue668-v2-v4-compare-pba.html') -exp.add_report(ComparativeReport(attributes=attributes,algorithm_pairs=[ - ('%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v2','%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v4'), - ('%s-sbf-miasm-allrnd-b50k' % 'issue668-v2','%s-sbf-miasm-allrnd-b50k' % 'issue668-v4'), -]),name='issue668-v2-v4-compare-paper.html') - -exp.run_steps() - - - - - - diff --git a/experiments/issue668/v2.py b/experiments/issue668/v2.py deleted file mode 100755 index 4979b4e480..0000000000 --- a/experiments/issue668/v2.py +++ /dev/null @@ -1,119 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.absolute import AbsoluteReport - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue668-v2"] -CONFIGS = [ - IssueConfig('sbf-miasm-rl-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-allrnd-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),single_random])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[ - '%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v2', - '%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v2', - '%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v2', - '%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v2', - '%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v2', - '%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v2', - '%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v2', - '%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v2', - '%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v2', -]),outfile='issue668-v2-abp.html') -exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[ - '%s-sbf-miasm-rl-otn-pba-b50k' % 'issue668-v2', - '%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v2', - '%s-sbf-miasm-rl-rnd-pba-b50k' % 'issue668-v2', - '%s-sbf-miasm-l-otn-pba-b50k' % 'issue668-v2', - '%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v2', - '%s-sbf-miasm-l-rnd-pba-b50k' % 'issue668-v2', - '%s-sbf-miasm-rnd-otn-pba-b50k' % 'issue668-v2', - '%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v2', - '%s-sbf-miasm-rnd-rnd-pba-b50k' % 'issue668-v2', -]),name='issue668-v2-pba.html') -exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[ - '%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v2', - '%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v2', - '%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v2', - '%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v2', - '%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v2', - '%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v2', - '%s-sbf-miasm-allrnd-b50k' % 'issue668-v2', -]),name='issue668-v2-paper.html') - -exp.run_steps() - - - - - - diff --git a/experiments/issue668/v3.py b/experiments/issue668/v3.py deleted file mode 100755 index fe837f1603..0000000000 --- a/experiments/issue668/v3.py +++ /dev/null @@ -1,119 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.absolute import AbsoluteReport - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue668-v3"] -CONFIGS = [ - IssueConfig('sbf-miasm-rl-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-allrnd-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),single_random])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[ - '%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v3', - '%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v3', - '%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v3', - '%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v3', - '%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v3', - '%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v3', - '%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v3', - '%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v3', - '%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v3', -]),outfile='issue668-v3-abp.html') -exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[ - '%s-sbf-miasm-rl-otn-pba-b50k' % 'issue668-v3', - '%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v3', - '%s-sbf-miasm-rl-rnd-pba-b50k' % 'issue668-v3', - '%s-sbf-miasm-l-otn-pba-b50k' % 'issue668-v3', - '%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v3', - '%s-sbf-miasm-l-rnd-pba-b50k' % 'issue668-v3', - '%s-sbf-miasm-rnd-otn-pba-b50k' % 'issue668-v3', - '%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v3', - '%s-sbf-miasm-rnd-rnd-pba-b50k' % 'issue668-v3', -]),name='issue668-v3-pba.html') -exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[ - '%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v3', - '%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v3', - '%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v3', - '%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v3', - '%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v3', - '%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v3', - '%s-sbf-miasm-allrnd-b50k' % 'issue668-v3', -]),name='issue668-v3-paper.html') - -exp.run_steps() - - - - - - diff --git a/experiments/issue668/v4.py b/experiments/issue668/v4.py deleted file mode 100755 index 4a3ba5fd40..0000000000 --- a/experiments/issue668/v4.py +++ /dev/null @@ -1,119 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.absolute import AbsoluteReport - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue668-v4"] -CONFIGS = [ - IssueConfig('sbf-miasm-rl-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-allrnd-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),single_random])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[ - '%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v4', - '%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v4', - '%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v4', - '%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v4', - '%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v4', - '%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v4', - '%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v4', - '%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v4', - '%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v4', -]),outfile='issue668-v4-abp.html') -exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[ - '%s-sbf-miasm-rl-otn-pba-b50k' % 'issue668-v4', - '%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v4', - '%s-sbf-miasm-rl-rnd-pba-b50k' % 'issue668-v4', - '%s-sbf-miasm-l-otn-pba-b50k' % 'issue668-v4', - '%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v4', - '%s-sbf-miasm-l-rnd-pba-b50k' % 'issue668-v4', - '%s-sbf-miasm-rnd-otn-pba-b50k' % 'issue668-v4', - '%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v4', - '%s-sbf-miasm-rnd-rnd-pba-b50k' % 'issue668-v4', -]),name='issue668-v4-pba.html') -exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[ - '%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v4', - '%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v4', - '%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v4', - '%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v4', - '%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v4', - '%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v4', - '%s-sbf-miasm-allrnd-b50k' % 'issue668-v4', -]),name='issue668-v4-paper.html') - -exp.run_steps() - - - - - - diff --git a/experiments/issue668/v5-clean.py b/experiments/issue668/v5-clean.py deleted file mode 100755 index afb56551a0..0000000000 --- a/experiments/issue668/v5-clean.py +++ /dev/null @@ -1,121 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.absolute import AbsoluteReport - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue668-v5-clean"] -CONFIGS = [ - IssueConfig('sbf-miasm-rl-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-allrnd-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),single_random])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="silvan.sievers@unibas.ch", export=["PATH"]) - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'parcprinter-opt11-strips:p01.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER) -exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER) -exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER) -exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER) -exp.add_parser('ms_parser', 'ms-parser.py') - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[ - '%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v5-clean', -]),outfile='issue668-v5-clean-abp.html') -exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[ - '%s-sbf-miasm-rl-otn-pba-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-rl-rnd-pba-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-l-otn-pba-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-l-rnd-pba-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-rnd-otn-pba-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-rnd-rnd-pba-b50k' % 'issue668-v5-clean', -]),outfile='issue668-v5-clean-pba.html') -exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[ - '%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v5-clean', - '%s-sbf-miasm-allrnd-b50k' % 'issue668-v5-clean', -]),outfile='issue668-v5-clean-paper.html') - -exp.run_steps() - - - - - - diff --git a/experiments/issue668/v5-compare.py b/experiments/issue668/v5-compare.py deleted file mode 100755 index 71957dfee7..0000000000 --- a/experiments/issue668/v5-compare.py +++ /dev/null @@ -1,99 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = [] -CONFIGS = [] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="silvan.sievers@unibas.ch", export=["PATH"]) - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'parcprinter-opt11-strips:p01.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_fetcher('data/issue668-v5-hack-eval') -exp.add_fetcher('data/issue668-v5-clean-eval') - -exp.add_report(ComparativeReport(attributes=attributes,algorithm_pairs=[ - ('%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v5-clean'), -]),outfile='issue668-v5-hack-vs-clean-abp.html') -exp.add_report(ComparativeReport(attributes=attributes,algorithm_pairs=[ - ('%s-sbf-miasm-rl-otn-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rl-otn-pba-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-rl-rnd-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rl-rnd-pba-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-l-otn-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-l-otn-pba-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-l-rnd-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-l-rnd-pba-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-rnd-otn-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rnd-otn-pba-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-rnd-rnd-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rnd-rnd-pba-b50k' % 'issue668-v5-clean'), -]),outfile='issue668-v5-hack-vs-clean-pba.html') -exp.add_report(ComparativeReport(attributes=attributes,algorithm_pairs=[ - ('%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v5-hack','%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v5-clean'), - ('%s-sbf-miasm-allrnd-b50k' % 'issue668-v5-hack','%s-sbf-miasm-allrnd-b50k' % 'issue668-v5-clean'), -]),outfile='issue668-v5-hack-vs-clean-paper.html') - -exp.run_steps() - - - - - - diff --git a/experiments/issue668/v5-hack.py b/experiments/issue668/v5-hack.py deleted file mode 100755 index ade3fe54fa..0000000000 --- a/experiments/issue668/v5-hack.py +++ /dev/null @@ -1,122 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.absolute import AbsoluteReport - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue668-v5-hack"] -CONFIGS = [ - IssueConfig('sbf-miasm-rl-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-otn-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-rnd-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rl-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-l-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-otn-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=old_to_new,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-rnd-rnd-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=random,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sbf-miasm-allrnd-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),single_random])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="silvan.sievers@unibas.ch", export=["PATH"]) - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'parcprinter-opt11-strips:p01.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER) -exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER) -exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER) -exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER) -exp.add_parser('ms_parser', 'ms-parser.py') - - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[ - '%s-sbf-miasm-rl-otn-abp-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-rl-rnd-abp-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-l-otn-abp-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-l-rnd-abp-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-rnd-otn-abp-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-rnd-rnd-abp-b50k' % 'issue668-v5-hack', -]),outfile='issue668-v5-hack-abp.html') -exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[ - '%s-sbf-miasm-rl-otn-pba-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-rl-rnd-pba-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-l-otn-pba-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-l-rnd-pba-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-rnd-otn-pba-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-rnd-rnd-pba-b50k' % 'issue668-v5-hack', -]),outfile='issue668-v5-hack-pba.html') -exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[ - '%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-allrnd-b50k' % 'issue668-v5-hack', -]),outfile='issue668-v5-hack-paper.html') - -exp.run_steps() - - - - - - diff --git a/experiments/issue668/v5-paper.py b/experiments/issue668/v5-paper.py deleted file mode 100755 index df59a20c4c..0000000000 --- a/experiments/issue668/v5-paper.py +++ /dev/null @@ -1,92 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.absolute import AbsoluteReport - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue668-v5-hack"] -CONFIGS = [ - IssueConfig('sbf-miasm-rl-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']), - IssueConfig('sbf-miasm-l-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']), - IssueConfig('sbf-miasm-rnd-nto-abp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=true)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']), - IssueConfig('sbf-miasm-rl-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']), - IssueConfig('sbf-miasm-l-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=level,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']), - IssueConfig('sbf-miasm-rnd-nto-pba-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=random,product_ts_order=new_to_old,atomic_before_product=false)])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']), - IssueConfig('sbf-miasm-allrnd-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),single_random])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'], driver_options=['--search-memory-limit', '2048M']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="silvan.sievers@unibas.ch", export=["PATH"], partition='infai_1') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'parcprinter-opt11-strips:p01.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER) -exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER) -exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER) -exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER) -exp.add_parser('ms_parser', 'ms-parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_report(AbsoluteReport(attributes=attributes,filter_algorithm=[ - '%s-sbf-miasm-rl-nto-abp-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-l-nto-abp-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-rnd-nto-abp-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-rl-nto-pba-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-l-nto-pba-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-rnd-nto-pba-b50k' % 'issue668-v5-hack', - '%s-sbf-miasm-allrnd-b50k' % 'issue668-v5-hack', -]),outfile='issue668-v5-paper.html') - -exp.run_steps() - - - - - - diff --git a/experiments/issue67/common_setup.py b/experiments/issue67/common_setup.py deleted file mode 100644 index 6989ef8686..0000000000 --- a/experiments/issue67/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in self.get_supported_attributes( - config_nick, attributes): - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue67/issue67.py b/experiments/issue67/issue67.py deleted file mode 100755 index e36fc36d1e..0000000000 --- a/experiments/issue67/issue67.py +++ /dev/null @@ -1,30 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -import common_setup - - -REVS = ["issue67-v1-base", "issue67-v1"] -SUITE = suites.suite_optimal_with_ipc11() - -CONFIGS = { - "astar_blind": [ - "--search", - "astar(blind())"], - "astar_lmcut": [ - "--search", - "astar(lmcut())"], - "astar_lm_zg": [ - "--search", - "astar(lmcount(lm_zg(), admissible=true, optimal=true))"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - ) -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue67/relativescatter.py b/experiments/issue67/relativescatter.py deleted file mode 100644 index 41a8385a87..0000000000 --- a/experiments/issue67/relativescatter.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -# -# downward uses the lab package to conduct experiments with the -# Fast Downward planning system. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from collections import defaultdict -import os - -from lab import tools - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue67/v4.py b/experiments/issue67/v4.py deleted file mode 100755 index 4f68429d3c..0000000000 --- a/experiments/issue67/v4.py +++ /dev/null @@ -1,39 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from downward import suites -import common_setup -from relativescatter import RelativeScatterPlotReport - - -REVS = ["issue67-v4-base", "issue67-v4"] -SUITE = suites.suite_optimal_with_ipc11() - -CONFIGS = { - "astar_blind": [ - "--search", - "astar(blind())"], - "astar_lmcut": [ - "--search", - "astar(lmcut())"], - "astar_lm_zg": [ - "--search", - "astar(lmcount(lm_zg(), admissible=true, optimal=true))"], -} - -exp = common_setup.IssueExperiment( - search_revisions=REVS, - configs=CONFIGS, - suite=SUITE, - ) -exp.add_comparison_table_step() - -exp.add_report( - RelativeScatterPlotReport( - attributes=["total_time"], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile='issue67-v4-total-time.png' -) - -exp() diff --git a/experiments/issue671/common_setup.py b/experiments/issue671/common_setup.py deleted file mode 100644 index 934531f15d..0000000000 --- a/experiments/issue671/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, - grid_priority=None, path=None, test_suite=None, - email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(benchmarks_dir, suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue671/relativescatter.py b/experiments/issue671/relativescatter.py deleted file mode 100644 index 68e2529f34..0000000000 --- a/experiments/issue671/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue671/suites.py b/experiments/issue671/suites.py deleted file mode 100755 index 4615212cfd..0000000000 --- a/experiments/issue671/suites.py +++ /dev/null @@ -1,350 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import argparse -import textwrap - - -HELP = "Convert suite name to list of domains or tasks." - - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted(set( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat())) - - -def suite_unsolvable(): - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt() + suite_ipc14_opt_strips()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat() + suite_ipc14_sat_strips()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("suite", help="suite name") - return parser.parse_args() - - -def main(): - prefix = "suite_" - suite_names = [ - name[len(prefix):] for name in sorted(globals().keys()) - if name.startswith(prefix)] - parser = argparse.ArgumentParser(description=HELP) - parser.add_argument("suite", choices=suite_names, help="suite name") - parser.add_argument( - "--width", default=72, type=int, - help="output line width (default: %(default)s). Use 1 for single " - "column.") - args = parser.parse_args() - suite_func = globals()[prefix + args.suite] - print(textwrap.fill( - str(suite_func()), - width=args.width, - break_long_words=False, - break_on_hyphens=False)) - - -if __name__ == "__main__": - main() diff --git a/experiments/issue671/v1.py b/experiments/issue671/v1.py deleted file mode 100755 index 3ebcedd942..0000000000 --- a/experiments/issue671/v1.py +++ /dev/null @@ -1,54 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_all() - - configs = { - IssueConfig('blind', ['--search', 'astar(blind())'], driver_options=['--search-time-limit', '60s']), - IssueConfig('lama-first', [], driver_options=['--alias', 'lama-first', '--search-time-limit', '60s']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl', 'gripper:prob01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.append('translator_*') - - exp.add_comparison_table_step() - - if matplotlib: - for attribute in ["memory", "total_time"]: - for config in configs: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - ) - - exp() - -main(revisions=['issue671-base', 'issue671-v1']) diff --git a/experiments/issue680/common_setup.py b/experiments/issue680/common_setup.py deleted file mode 100644 index 934531f15d..0000000000 --- a/experiments/issue680/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, - grid_priority=None, path=None, test_suite=None, - email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(benchmarks_dir, suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue680/relativescatter.py b/experiments/issue680/relativescatter.py deleted file mode 100644 index 68e2529f34..0000000000 --- a/experiments/issue680/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue680/suites.py b/experiments/issue680/suites.py deleted file mode 100755 index 4615212cfd..0000000000 --- a/experiments/issue680/suites.py +++ /dev/null @@ -1,350 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import argparse -import textwrap - - -HELP = "Convert suite name to list of domains or tasks." - - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted(set( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat())) - - -def suite_unsolvable(): - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt() + suite_ipc14_opt_strips()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat() + suite_ipc14_sat_strips()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("suite", help="suite name") - return parser.parse_args() - - -def main(): - prefix = "suite_" - suite_names = [ - name[len(prefix):] for name in sorted(globals().keys()) - if name.startswith(prefix)] - parser = argparse.ArgumentParser(description=HELP) - parser.add_argument("suite", choices=suite_names, help="suite name") - parser.add_argument( - "--width", default=72, type=int, - help="output line width (default: %(default)s). Use 1 for single " - "column.") - args = parser.parse_args() - suite_func = globals()[prefix + args.suite] - print(textwrap.fill( - str(suite_func()), - width=args.width, - break_long_words=False, - break_on_hyphens=False)) - - -if __name__ == "__main__": - main() diff --git a/experiments/issue680/v1-potential.py b/experiments/issue680/v1-potential.py deleted file mode 100755 index 783fb403bf..0000000000 --- a/experiments/issue680/v1-potential.py +++ /dev/null @@ -1,82 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks') - suite=suites.suite_optimal() - - configs = [] - - for osi in ['103', '107']: - for cplex in ['1251', '1263']: - if osi == '107' and cplex == '1251': - # incompatible versions - continue - configs += [ - IssueConfig( - 'astar_initial_state_potential_OSI%s_CPLEX%s' % (osi, cplex), - ['--search', 'astar(initial_state_potential())'], - build_options=['issue680_OSI%s_CPLEX%s' % (osi, cplex)], - driver_options=['--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)] - ), - IssueConfig( - 'astar_sample_based_potentials_OSI%s_CPLEX%s' % (osi, cplex), - ['--search', 'astar(sample_based_potentials())'], - build_options=['issue680_OSI%s_CPLEX%s' % (osi, cplex)], - driver_options=['--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)] - ), - IssueConfig( - 'astar_all_states_potential_OSI%s_CPLEX%s' % (osi, cplex), - ['--search', 'astar(all_states_potential())'], - build_options=['issue680_OSI%s_CPLEX%s' % (osi, cplex)], - driver_options=['--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)] - ), - ] - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl', 'gripper:prob01.pddl'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - - domains = suites.suite_optimal_strips() - - exp.add_absolute_report_step(filter_domain=domains) - - for attribute in ["memory", "total_time"]: - for config in ['astar_initial_state_potential', 'astar_sample_based_potentials', 'astar_all_states_potential']: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}_OSI{}_CPLEX1263".format(revisions[0], config, osi) for osi in ['103', '107']], - filter_domain=domains, - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}_CPLEX1263.png".format(exp.name, attribute, config) - ) - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}_OSI103_CPLEX{}".format(revisions[0], config, cplex) for cplex in ['1251', '1263']], - filter_domain=domains, - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}_OSI103.png".format(exp.name, attribute, config) - ) - - exp() - -main(revisions=['issue680-v1']) diff --git a/experiments/issue680/v1.py b/experiments/issue680/v1.py deleted file mode 100755 index 5fabc6a629..0000000000 --- a/experiments/issue680/v1.py +++ /dev/null @@ -1,82 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks') - suite=suites.suite_optimal() - - configs = [] - - for osi in ['103', '107']: - for cplex in ['1251', '1263']: - if osi == '107' and cplex == '1251': - # incompatible versions - continue - configs += [ - IssueConfig( - 'astar_seq_landmarks_OSI%s_CPLEX%s' % (osi, cplex), - ['--search', 'astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))'], - build_options=['issue680_OSI%s_CPLEX%s' % (osi, cplex)], - driver_options=['--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)] - ), - IssueConfig( - 'astar_diverse_potentials_OSI%s_CPLEX%s' % (osi, cplex), - ['--search', 'astar(diverse_potentials())'], - build_options=['issue680_OSI%s_CPLEX%s' % (osi, cplex)], - driver_options=['--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)] - ), - IssueConfig( - 'astar_lmcount_OSI%s_CPLEX%s' % (osi, cplex), - ['--search', 'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=true),mpd=true)'], - build_options=['issue680_OSI%s_CPLEX%s' % (osi, cplex)], - driver_options=['--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)] - ), - ] - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl', 'gripper:prob01.pddl'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - - domains = suites.suite_optimal_strips() - - exp.add_absolute_report_step(filter_domain=domains) - - for attribute in ["memory", "total_time"]: - for config in ['astar_seq_landmarks', 'astar_diverse_potentials', 'astar_lmcount']: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}_OSI{}_CPLEX1263".format(revisions[0], config, osi) for osi in ['103', '107']], - filter_domain=domains, - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}_CPLEX1263.png".format(exp.name, attribute, config) - ) - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}_OSI103_CPLEX{}".format(revisions[0], config, cplex) for cplex in ['1251', '1263']], - filter_domain=domains, - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}_OSI103.png".format(exp.name, attribute, config) - ) - - exp() - -main(revisions=['issue680-v1']) diff --git a/experiments/issue680/v2-potential.py b/experiments/issue680/v2-potential.py deleted file mode 100755 index bd40c0c751..0000000000 --- a/experiments/issue680/v2-potential.py +++ /dev/null @@ -1,82 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks') - suite=suites.suite_optimal() - - configs = [] - - for osi in ['103', '107']: - for cplex in ['1251', '1263']: - if osi == '107' and cplex == '1251': - # incompatible versions - continue - configs += [ - IssueConfig( - 'astar_initial_state_potential_OSI%s_CPLEX%s' % (osi, cplex), - ['--search', 'astar(initial_state_potential())'], - build_options=['issue680_OSI%s_CPLEX%s' % (osi, cplex)], - driver_options=['--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)] - ), - IssueConfig( - 'astar_sample_based_potentials_OSI%s_CPLEX%s' % (osi, cplex), - ['--search', 'astar(sample_based_potentials())'], - build_options=['issue680_OSI%s_CPLEX%s' % (osi, cplex)], - driver_options=['--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)] - ), - IssueConfig( - 'astar_all_states_potential_OSI%s_CPLEX%s' % (osi, cplex), - ['--search', 'astar(all_states_potential())'], - build_options=['issue680_OSI%s_CPLEX%s' % (osi, cplex)], - driver_options=['--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)] - ), - ] - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl', 'gripper:prob01.pddl'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - - domains = suites.suite_optimal_strips() - - exp.add_absolute_report_step(filter_domain=domains) - - for attribute in ["memory", "total_time"]: - for config in ['astar_initial_state_potential', 'astar_sample_based_potentials', 'astar_all_states_potential']: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}_OSI{}_CPLEX1263".format(revisions[0], config, osi) for osi in ['103', '107']], - filter_domain=domains, - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}_CPLEX1263.png".format(exp.name, attribute, config) - ) - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}_OSI103_CPLEX{}".format(revisions[0], config, cplex) for cplex in ['1251', '1263']], - filter_domain=domains, - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}_OSI103.png".format(exp.name, attribute, config) - ) - - exp() - -main(revisions=['issue680-v2']) diff --git a/experiments/issue680/v2.py b/experiments/issue680/v2.py deleted file mode 100755 index 86c95b06fb..0000000000 --- a/experiments/issue680/v2.py +++ /dev/null @@ -1,82 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks') - suite=suites.suite_optimal() - - configs = [] - - for osi in ['103', '107']: - for cplex in ['1251', '1263']: - if osi == '107' and cplex == '1251': - # incompatible versions - continue - configs += [ - IssueConfig( - 'astar_seq_landmarks_OSI%s_CPLEX%s' % (osi, cplex), - ['--search', 'astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))'], - build_options=['issue680_OSI%s_CPLEX%s' % (osi, cplex)], - driver_options=['--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)] - ), - IssueConfig( - 'astar_diverse_potentials_OSI%s_CPLEX%s' % (osi, cplex), - ['--search', 'astar(diverse_potentials())'], - build_options=['issue680_OSI%s_CPLEX%s' % (osi, cplex)], - driver_options=['--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)] - ), - IssueConfig( - 'astar_lmcount_OSI%s_CPLEX%s' % (osi, cplex), - ['--search', 'astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=true),mpd=true)'], - build_options=['issue680_OSI%s_CPLEX%s' % (osi, cplex)], - driver_options=['--build=issue680_OSI%s_CPLEX%s' % (osi, cplex)] - ), - ] - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl', 'gripper:prob01.pddl'], - processes=4, - email='florian.pommerening@unibas.ch', - ) - - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - - domains = suites.suite_optimal_strips() - - exp.add_absolute_report_step(filter_domain=domains) - - for attribute in ["memory", "total_time"]: - for config in ['astar_seq_landmarks', 'astar_diverse_potentials', 'astar_lmcount']: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}_OSI{}_CPLEX1263".format(revisions[0], config, osi) for osi in ['103', '107']], - filter_domain=domains, - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}_CPLEX1263.png".format(exp.name, attribute, config) - ) - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_config=["{}-{}_OSI103_CPLEX{}".format(revisions[0], config, cplex) for cplex in ['1251', '1263']], - filter_domain=domains, - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}_OSI103.png".format(exp.name, attribute, config) - ) - - exp() - -main(revisions=['issue680-v2']) diff --git a/experiments/issue682/common_setup.py b/experiments/issue682/common_setup.py deleted file mode 100644 index 934531f15d..0000000000 --- a/experiments/issue682/common_setup.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, benchmarks_dir, suite, revisions=[], configs={}, - grid_priority=None, path=None, test_suite=None, - email=None, processes=None, - **kwargs): - """ - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - *configs* must be a non-empty list of IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(..., suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(..., suite=suites.suite_all()) - IssueExperiment(..., suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(..., suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(..., grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(..., test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - If *email* is specified, it should be an email address. This - email address will be notified upon completion of the experiments - if it is run on the cluster. - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment(processes=processes) - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment( - priority=grid_priority, email=email) - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - repo = get_repo_base() - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - repo, - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self.add_suite(benchmarks_dir, suite) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join(self.eval_dir, - get_experiment_name() + "." + - report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step('publish-absolute-report', - subprocess.call, - ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + "." + report.output_format) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare" % (self.name, rev1, rev2) - + ".html") - subprocess.call(['publish', outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step("publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = ScatterPlotReport( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue682/ms-parser.py b/experiments/issue682/ms-parser.py deleted file mode 100755 index 380eb818d8..0000000000 --- a/experiments/issue682/ms-parser.py +++ /dev/null @@ -1,73 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) -parser.add_pattern('ms_atomic_construction_time', 't=(.+)s \(after computation of atomic transition systems\)', required=False, type=float) -parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) -parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[t=.+s\]', required=False, type=float) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -parser.parse() diff --git a/experiments/issue682/relativescatter.py b/experiments/issue682/relativescatter.py deleted file mode 100644 index 68e2529f34..0000000000 --- a/experiments/issue682/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter(axis, - report.missing_val if report.show_missing else None) - return has_points - - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue682/suites.py b/experiments/issue682/suites.py deleted file mode 100644 index 4615212cfd..0000000000 --- a/experiments/issue682/suites.py +++ /dev/null @@ -1,350 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import argparse -import textwrap - - -HELP = "Convert suite name to list of domains or tasks." - - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted(set( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat())) - - -def suite_unsolvable(): - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt() + suite_ipc14_opt_strips()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat() + suite_ipc14_sat_strips()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("suite", help="suite name") - return parser.parse_args() - - -def main(): - prefix = "suite_" - suite_names = [ - name[len(prefix):] for name in sorted(globals().keys()) - if name.startswith(prefix)] - parser = argparse.ArgumentParser(description=HELP) - parser.add_argument("suite", choices=suite_names, help="suite name") - parser.add_argument( - "--width", default=72, type=int, - help="output line width (default: %(default)s). Use 1 for single " - "column.") - args = parser.parse_args() - suite_func = globals()[prefix + args.suite] - print(textwrap.fill( - str(suite_func()), - width=args.width, - break_long_words=False, - break_on_hyphens=False)) - - -if __name__ == "__main__": - main() diff --git a/experiments/issue682/v1.py b/experiments/issue682/v1.py deleted file mode 100755 index c620fb2f3f..0000000000 --- a/experiments/issue682/v1.py +++ /dev/null @@ -1,92 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import suites -from lab.reports import Attribute, gm - -from common_setup import IssueConfig, IssueExperiment -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - -def main(revisions=None): - benchmarks_dir=os.path.expanduser('~/repos/downward/benchmarks') - suite=suites.suite_optimal_strips() - - configs = { - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('cggl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=cg_goal_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('cggl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=cg_goal_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('cggl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=cg_goal_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - } - - exp = IssueExperiment( - benchmarks_dir=benchmarks_dir, - suite=suite, - revisions=revisions, - configs=configs, - test_suite=['depot:p01.pddl'], - processes=4, - email='silvan.sievers@unibas.ch', - ) - exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') - exp.add_command('ms-parser', ['ms_parser']) - - # planner outcome attributes - perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - actual_search_time = Attribute('actual_search_time', absolute=False, min_wins=True, functions=[gm]) - - # m&s attributes - ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[gm]) - ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) - ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) - ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) - ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) - search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) - search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - - extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - actual_search_time, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, - ] - attributes = exp.DEFAULT_TABLE_ATTRIBUTES - attributes.extend(extra_attributes) - - exp.add_comparison_table_step() - - #if matplotlib: - #for attribute in ["memory", "total_time"]: - #for config in configs: - #exp.add_report( - #RelativeScatterPlotReport( - #attributes=[attribute], - #filter_config=["{}-{}".format(rev, config.nick) for rev in revisions], - #get_category=lambda run1, run2: run1.get("domain"), - #), - #outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick) - #) - - exp() - -main(revisions=['issue682-base', 'issue682-v1']) diff --git a/experiments/issue684/common_setup.py b/experiments/issue684/common_setup.py deleted file mode 100644 index 4dff4aacfd..0000000000 --- a/experiments/issue684/common_setup.py +++ /dev/null @@ -1,335 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step( - 'publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step( - "publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step(step_name, make_scatter_plots)) diff --git a/experiments/issue684/relativescatter.py b/experiments/issue684/relativescatter.py deleted file mode 100644 index 14d5d42752..0000000000 --- a/experiments/issue684/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue684/v1.py b/experiments/issue684/v1.py deleted file mode 100755 index aee446ffab..0000000000 --- a/experiments/issue684/v1.py +++ /dev/null @@ -1,64 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue684-base", "issue684-v1"] -CONFIGS = [ - IssueConfig( - alias, [], driver_options=["--alias", alias, "--search-time-limit", "60s"]) - for alias in [ - "seq-sat-fd-autotune-1", "seq-sat-fd-autotune-2", - "seq-sat-fdss-1", "seq-sat-fdss-2", "seq-sat-fdss-2014", - "seq-sat-lama-2011", "lama-first"] -] -SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue688/common_setup.py b/experiments/issue688/common_setup.py deleted file mode 100644 index 5d2b40b61c..0000000000 --- a/experiments/issue688/common_setup.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue688/relativescatter.py b/experiments/issue688/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue688/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue688/v1-opt.py b/experiments/issue688/v1-opt.py deleted file mode 100755 index bc41b32b04..0000000000 --- a/experiments/issue688/v1-opt.py +++ /dev/null @@ -1,47 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue688-v1-base", "issue688-v1"] -BUILDS = ["release32"] -SEARCHES = [ - ("blind", ["--search", "astar(blind())"]), - ("ipdb", ["--search", "astar(ipdb())"]), - ("divpot", ["--search", "astar(diverse_potentials())"]), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - search, - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue688/v1-sat.py b/experiments/issue688/v1-sat.py deleted file mode 100755 index 69a2a5085c..0000000000 --- a/experiments/issue688/v1-sat.py +++ /dev/null @@ -1,53 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue688-v1-base", "issue688-v1"] -BUILDS = ["release32"] -SEARCHES = [ - ("eager_ff", ["--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"]), - ("lazy_add", ["--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"]), - ("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=h)"]), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - search, - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] + [ - IssueConfig( - "lama-first-{build}".format(**locals()), - [], - build_options=[build], - driver_options=["--build", build, "--alias", "lama-first"]) -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue688/v2-opt.py b/experiments/issue688/v2-opt.py deleted file mode 100755 index abffcc79f1..0000000000 --- a/experiments/issue688/v2-opt.py +++ /dev/null @@ -1,54 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue688-v2-base", "issue688-v2"] -BUILDS = ["release32"] -SEARCHES = [ - ("blind", ["--search", "astar(blind())"]), - ("ipdb", ["--search", "astar(ipdb())"]), - ("divpot", ["--search", "astar(diverse_potentials())"]), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - search, - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.add_report(RelativeScatterPlotReport( - attributes=["search_time"], - filter_algorithm=["issue688-v2-base-blind-release32", "issue688-v2-blind-release32"], - get_category=lambda run1, run2: run1.get("domain"), -), outfile="{}-blind-search_time.png".format(exp.name)) - -exp.run_steps() diff --git a/experiments/issue688/v2-sat.py b/experiments/issue688/v2-sat.py deleted file mode 100755 index 7df4df74f0..0000000000 --- a/experiments/issue688/v2-sat.py +++ /dev/null @@ -1,53 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue688-v2-base", "issue688-v2"] -BUILDS = ["release32"] -SEARCHES = [ - ("eager_ff", ["--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"]), - ("lazy_add", ["--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"]), - ("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=h)"]), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - search, - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] + [ - IssueConfig( - "lama-first-{build}".format(**locals()), - [], - build_options=[build], - driver_options=["--build", build, "--alias", "lama-first"]) -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue688/v3-opt.py b/experiments/issue688/v3-opt.py deleted file mode 100755 index 10121b9b3b..0000000000 --- a/experiments/issue688/v3-opt.py +++ /dev/null @@ -1,54 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue688-v3-base", "issue688-v3"] -BUILDS = ["release32"] -SEARCHES = [ - ("blind", ["--search", "astar(blind())"]), - ("ipdb", ["--search", "astar(ipdb())"]), - ("divpot", ["--search", "astar(diverse_potentials())"]), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - search, - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.add_report(RelativeScatterPlotReport( - attributes=["search_time"], - filter_algorithm=["issue688-v3-base-blind-release32", "issue688-v3-blind-release32"], - get_category=lambda run1, run2: run1.get("domain"), -), outfile="{}-blind-search_time.png".format(exp.name)) - -exp.run_steps() diff --git a/experiments/issue688/v3-sat.py b/experiments/issue688/v3-sat.py deleted file mode 100755 index 8268d27b1d..0000000000 --- a/experiments/issue688/v3-sat.py +++ /dev/null @@ -1,53 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue688-v3-base", "issue688-v3"] -BUILDS = ["release32"] -SEARCHES = [ - ("eager_ff", ["--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"]), - ("lazy_add", ["--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"]), - ("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=h)"]), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - search, - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] + [ - IssueConfig( - "lama-first-{build}".format(**locals()), - [], - build_options=[build], - driver_options=["--build", build, "--alias", "lama-first"]) -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue693/common_setup.py b/experiments/issue693/common_setup.py deleted file mode 100644 index 53b62ab252..0000000000 --- a/experiments/issue693/common_setup.py +++ /dev/null @@ -1,386 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue693/hash-microbenchmark/.gitignore b/experiments/issue693/hash-microbenchmark/.gitignore deleted file mode 100644 index 44e0458dfa..0000000000 --- a/experiments/issue693/hash-microbenchmark/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/.obj/ -/benchmark32 -/benchmark64 -/Makefile.depend diff --git a/experiments/issue693/hash-microbenchmark/Makefile b/experiments/issue693/hash-microbenchmark/Makefile deleted file mode 100644 index 58d5c758ef..0000000000 --- a/experiments/issue693/hash-microbenchmark/Makefile +++ /dev/null @@ -1,147 +0,0 @@ -DOWNWARD_BITWIDTH ?= 32 - -HEADERS = \ - fast_hash.h \ - hash.h \ - SpookyV2.h \ - -SOURCES = main.cc SpookyV2.cc -TARGET = benchmark - -default: release - -OBJECT_SUFFIX_RELEASE = .release$(DOWNWARD_BITWIDTH) -TARGET_SUFFIX_RELEASE = $(DOWNWARD_BITWIDTH) -OBJECT_SUFFIX_DEBUG = .debug$(DOWNWARD_BITWIDTH) -TARGET_SUFFIX_DEBUG = -debug$(DOWNWARD_BITWIDTH) -OBJECT_SUFFIX_PROFILE = .profile$(DOWNWARD_BITWIDTH) -TARGET_SUFFIX_PROFILE = -profile$(DOWNWARD_BITWIDTH) - -OBJECTS_RELEASE = $(SOURCES:%.cc=.obj/%$(OBJECT_SUFFIX_RELEASE).o) -TARGET_RELEASE = $(TARGET)$(TARGET_SUFFIX_RELEASE) - -OBJECTS_DEBUG = $(SOURCES:%.cc=.obj/%$(OBJECT_SUFFIX_DEBUG).o) -TARGET_DEBUG = $(TARGET)$(TARGET_SUFFIX_DEBUG) - -OBJECTS_PROFILE = $(SOURCES:%.cc=.obj/%$(OBJECT_SUFFIX_PROFILE).o) -TARGET_PROFILE = $(TARGET)$(TARGET_SUFFIX_PROFILE) - -DEPEND = $(CXX) -MM - -## CXXFLAGS, LDFLAGS, POSTLINKOPT are options for compiler and linker -## that are used for all three targets (release, debug, and profile). -## (POSTLINKOPT are options that appear *after* all object files.) - -ifeq ($(DOWNWARD_BITWIDTH), 32) - BITWIDTHOPT = -m32 -else ifeq ($(DOWNWARD_BITWIDTH), 64) - BITWIDTHOPT = -m64 -else - $(error Bad value for DOWNWARD_BITWIDTH) -endif - -CXXFLAGS = -CXXFLAGS += -g -CXXFLAGS += $(BITWIDTHOPT) -# Note: we write "-std=c++0x" rather than "-std=c++11" to support gcc 4.4. -CXXFLAGS += -std=c++0x -Wall -Wextra -pedantic -Wno-deprecated -Werror - -LDFLAGS = -LDFLAGS += $(BITWIDTHOPT) -LDFLAGS += -g - -POSTLINKOPT = - -CXXFLAGS_RELEASE = -O3 -DNDEBUG -fomit-frame-pointer -CXXFLAGS_DEBUG = -O3 -CXXFLAGS_PROFILE = -O3 -pg - -LDFLAGS_RELEASE = -LDFLAGS_DEBUG = -LDFLAGS_PROFILE = -pg - -POSTLINKOPT_RELEASE = -POSTLINKOPT_DEBUG = -POSTLINKOPT_PROFILE = - -LDFLAGS_RELEASE += -static -static-libgcc - -POSTLINKOPT_RELEASE += -Wl,-Bstatic -lrt -POSTLINKOPT_DEBUG += -lrt -POSTLINKOPT_PROFILE += -lrt - -all: release debug profile - -## Build rules for the release target follow. - -release: $(TARGET_RELEASE) - -$(TARGET_RELEASE): $(OBJECTS_RELEASE) - $(CXX) $(LDFLAGS) $(LDFLAGS_RELEASE) $(OBJECTS_RELEASE) $(POSTLINKOPT) $(POSTLINKOPT_RELEASE) -o $(TARGET_RELEASE) - -$(OBJECTS_RELEASE): .obj/%$(OBJECT_SUFFIX_RELEASE).o: %.cc - @mkdir -p $$(dirname $@) - $(CXX) $(CXXFLAGS) $(CXXFLAGS_RELEASE) -c $< -o $@ - -## Build rules for the debug target follow. - -debug: $(TARGET_DEBUG) - -$(TARGET_DEBUG): $(OBJECTS_DEBUG) - $(CXX) $(LDFLAGS) $(LDFLAGS_DEBUG) $(OBJECTS_DEBUG) $(POSTLINKOPT) $(POSTLINKOPT_DEBUG) -o $(TARGET_DEBUG) - -$(OBJECTS_DEBUG): .obj/%$(OBJECT_SUFFIX_DEBUG).o: %.cc - @mkdir -p $$(dirname $@) - $(CXX) $(CXXFLAGS) $(CXXFLAGS_DEBUG) -c $< -o $@ - -## Build rules for the profile target follow. - -profile: $(TARGET_PROFILE) - -$(TARGET_PROFILE): $(OBJECTS_PROFILE) - $(CXX) $(LDFLAGS) $(LDFLAGS_PROFILE) $(OBJECTS_PROFILE) $(POSTLINKOPT) $(POSTLINKOPT_PROFILE) -o $(TARGET_PROFILE) - -$(OBJECTS_PROFILE): .obj/%$(OBJECT_SUFFIX_PROFILE).o: %.cc - @mkdir -p $$(dirname $@) - $(CXX) $(CXXFLAGS) $(CXXFLAGS_PROFILE) -c $< -o $@ - -## Additional targets follow. - -PROFILE: $(TARGET_PROFILE) - ./$(TARGET_PROFILE) $(ARGS_PROFILE) - gprof $(TARGET_PROFILE) | (cleanup-profile 2> /dev/null || cat) > PROFILE - -clean: - rm -rf .obj - rm -f *~ *.pyc - rm -f Makefile.depend gmon.out PROFILE core - rm -f sas_plan - -distclean: clean - rm -f $(TARGET_RELEASE) $(TARGET_DEBUG) $(TARGET_PROFILE) - -## NOTE: If we just call gcc -MM on a source file that lives within a -## subdirectory, it will strip the directory part in the output. Hence -## the for loop with the sed call. - -Makefile.depend: $(SOURCES) $(HEADERS) - rm -f Makefile.temp - for source in $(SOURCES) ; do \ - $(DEPEND) $(CXXFLAGS) $$source > Makefile.temp0; \ - objfile=$${source%%.cc}.o; \ - sed -i -e "s@^[^:]*:@$$objfile:@" Makefile.temp0; \ - cat Makefile.temp0 >> Makefile.temp; \ - done - rm -f Makefile.temp0 Makefile.depend - sed -e "s@\(.*\)\.o:\(.*\)@.obj/\1$(OBJECT_SUFFIX_RELEASE).o:\2@" Makefile.temp >> Makefile.depend - sed -e "s@\(.*\)\.o:\(.*\)@.obj/\1$(OBJECT_SUFFIX_DEBUG).o:\2@" Makefile.temp >> Makefile.depend - sed -e "s@\(.*\)\.o:\(.*\)@.obj/\1$(OBJECT_SUFFIX_PROFILE).o:\2@" Makefile.temp >> Makefile.depend - rm -f Makefile.temp - -ifneq ($(MAKECMDGOALS),clean) - ifneq ($(MAKECMDGOALS),distclean) - -include Makefile.depend - endif -endif - -.PHONY: default all release debug profile clean distclean diff --git a/experiments/issue693/hash-microbenchmark/SpookyV2.cc b/experiments/issue693/hash-microbenchmark/SpookyV2.cc deleted file mode 100644 index b60c2de1fc..0000000000 --- a/experiments/issue693/hash-microbenchmark/SpookyV2.cc +++ /dev/null @@ -1,320 +0,0 @@ -// Spooky Hash -// A 128-bit noncryptographic hash, for checksums and table lookup -// By Bob Jenkins. Public domain. -// Oct 31 2010: published framework, disclaimer ShortHash isn't right -// Nov 7 2010: disabled ShortHash -// Oct 31 2011: replace End, ShortMix, ShortEnd, enable ShortHash again -// April 10 2012: buffer overflow on platforms without unaligned reads -// July 12 2012: was passing out variables in final to in/out in short -// July 30 2012: I reintroduced the buffer overflow -// August 5 2012: SpookyV2: d = should be d += in short hash, and remove extra mix from long hash - -#include -#include "SpookyV2.h" - -#define ALLOW_UNALIGNED_READS 1 - -// -// short hash ... it could be used on any message, -// but it's used by Spooky just for short messages. -// -void SpookyHash::Short( - const void *message, - size_t length, - uint64 *hash1, - uint64 *hash2) { - uint64 buf[2 * sc_numVars]; - union { - const uint8 *p8; - uint32 *p32; - uint64 *p64; - size_t i; - } - u; - - u.p8 = (const uint8 *)message; - - if (!ALLOW_UNALIGNED_READS && (u.i & 0x7)) { - memcpy(buf, message, length); - u.p64 = buf; - } - - size_t remainder = length % 32; - uint64 a = *hash1; - uint64 b = *hash2; - uint64 c = sc_const; - uint64 d = sc_const; - - if (length > 15) { - const uint64 *end = u.p64 + (length / 32) * 4; - - // handle all complete sets of 32 bytes - for (; u.p64 < end; u.p64 += 4) { - c += u.p64[0]; - d += u.p64[1]; - ShortMix(a, b, c, d); - a += u.p64[2]; - b += u.p64[3]; - } - - //Handle the case of 16+ remaining bytes. - if (remainder >= 16) { - c += u.p64[0]; - d += u.p64[1]; - ShortMix(a, b, c, d); - u.p64 += 2; - remainder -= 16; - } - } - - // Handle the last 0..15 bytes, and its length - d += ((uint64)length) << 56; - switch (remainder) { - case 15: - d += ((uint64)u.p8[14]) << 48; - case 14: - d += ((uint64)u.p8[13]) << 40; - case 13: - d += ((uint64)u.p8[12]) << 32; - case 12: - d += u.p32[2]; - c += u.p64[0]; - break; - case 11: - d += ((uint64)u.p8[10]) << 16; - case 10: - d += ((uint64)u.p8[9]) << 8; - case 9: - d += (uint64)u.p8[8]; - case 8: - c += u.p64[0]; - break; - case 7: - c += ((uint64)u.p8[6]) << 48; - case 6: - c += ((uint64)u.p8[5]) << 40; - case 5: - c += ((uint64)u.p8[4]) << 32; - case 4: - c += u.p32[0]; - break; - case 3: - c += ((uint64)u.p8[2]) << 16; - case 2: - c += ((uint64)u.p8[1]) << 8; - case 1: - c += (uint64)u.p8[0]; - break; - case 0: - c += sc_const; - d += sc_const; - } - ShortEnd(a, b, c, d); - *hash1 = a; - *hash2 = b; -} - - - - -// do the whole hash in one call -void SpookyHash::Hash128( - const void *message, - size_t length, - uint64 *hash1, - uint64 *hash2) { - if (length < sc_bufSize) { - Short(message, length, hash1, hash2); - return; - } - - uint64 h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11; - uint64 buf[sc_numVars]; - uint64 *end; - union { - const uint8 *p8; - uint64 *p64; - size_t i; - } - u; - size_t remainder; - - h0 = h3 = h6 = h9 = *hash1; - h1 = h4 = h7 = h10 = *hash2; - h2 = h5 = h8 = h11 = sc_const; - - u.p8 = (const uint8 *)message; - end = u.p64 + (length / sc_blockSize) * sc_numVars; - - // handle all whole sc_blockSize blocks of bytes - if (ALLOW_UNALIGNED_READS || ((u.i & 0x7) == 0)) { - while (u.p64 < end) { - Mix(u.p64, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11); - u.p64 += sc_numVars; - } - } else { - while (u.p64 < end) { - memcpy(buf, u.p64, sc_blockSize); - Mix(buf, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11); - u.p64 += sc_numVars; - } - } - - // handle the last partial block of sc_blockSize bytes - remainder = (length - ((const uint8 *)end - (const uint8 *)message)); - memcpy(buf, end, remainder); - memset(((uint8 *)buf) + remainder, 0, sc_blockSize - remainder); - ((uint8 *)buf)[sc_blockSize - 1] = remainder; - - // do some final mixing - End(buf, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11); - *hash1 = h0; - *hash2 = h1; -} - - - -// init spooky state -void SpookyHash::Init(uint64 seed1, uint64 seed2) { - m_length = 0; - m_remainder = 0; - m_state[0] = seed1; - m_state[1] = seed2; -} - - -// add a message fragment to the state -void SpookyHash::Update(const void *message, size_t length) { - uint64 h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11; - size_t newLength = length + m_remainder; - uint8 remainder; - union { - const uint8 *p8; - uint64 *p64; - size_t i; - } - u; - const uint64 *end; - - // Is this message fragment too short? If it is, stuff it away. - if (newLength < sc_bufSize) { - memcpy(&((uint8 *)m_data)[m_remainder], message, length); - m_length = length + m_length; - m_remainder = (uint8)newLength; - return; - } - - // init the variables - if (m_length < sc_bufSize) { - h0 = h3 = h6 = h9 = m_state[0]; - h1 = h4 = h7 = h10 = m_state[1]; - h2 = h5 = h8 = h11 = sc_const; - } else { - h0 = m_state[0]; - h1 = m_state[1]; - h2 = m_state[2]; - h3 = m_state[3]; - h4 = m_state[4]; - h5 = m_state[5]; - h6 = m_state[6]; - h7 = m_state[7]; - h8 = m_state[8]; - h9 = m_state[9]; - h10 = m_state[10]; - h11 = m_state[11]; - } - m_length = length + m_length; - - // if we've got anything stuffed away, use it now - if (m_remainder) { - uint8 prefix = sc_bufSize - m_remainder; - memcpy(&(((uint8 *)m_data)[m_remainder]), message, prefix); - u.p64 = m_data; - Mix(u.p64, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11); - Mix(&u.p64[sc_numVars], h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11); - u.p8 = ((const uint8 *)message) + prefix; - length -= prefix; - } else { - u.p8 = (const uint8 *)message; - } - - // handle all whole blocks of sc_blockSize bytes - end = u.p64 + (length / sc_blockSize) * sc_numVars; - remainder = (uint8)(length - ((const uint8 *)end - u.p8)); - if (ALLOW_UNALIGNED_READS || (u.i & 0x7) == 0) { - while (u.p64 < end) { - Mix(u.p64, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11); - u.p64 += sc_numVars; - } - } else { - while (u.p64 < end) { - memcpy(m_data, u.p8, sc_blockSize); - Mix(m_data, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11); - u.p64 += sc_numVars; - } - } - - // stuff away the last few bytes - m_remainder = remainder; - memcpy(m_data, end, remainder); - - // stuff away the variables - m_state[0] = h0; - m_state[1] = h1; - m_state[2] = h2; - m_state[3] = h3; - m_state[4] = h4; - m_state[5] = h5; - m_state[6] = h6; - m_state[7] = h7; - m_state[8] = h8; - m_state[9] = h9; - m_state[10] = h10; - m_state[11] = h11; -} - - -// report the hash for the concatenation of all message fragments so far -void SpookyHash::Final(uint64 *hash1, uint64 *hash2) { - // init the variables - if (m_length < sc_bufSize) { - *hash1 = m_state[0]; - *hash2 = m_state[1]; - Short(m_data, m_length, hash1, hash2); - return; - } - - const uint64 *data = (const uint64 *)m_data; - uint8 remainder = m_remainder; - - uint64 h0 = m_state[0]; - uint64 h1 = m_state[1]; - uint64 h2 = m_state[2]; - uint64 h3 = m_state[3]; - uint64 h4 = m_state[4]; - uint64 h5 = m_state[5]; - uint64 h6 = m_state[6]; - uint64 h7 = m_state[7]; - uint64 h8 = m_state[8]; - uint64 h9 = m_state[9]; - uint64 h10 = m_state[10]; - uint64 h11 = m_state[11]; - - if (remainder >= sc_blockSize) { - // m_data can contain two blocks; handle any whole first block - Mix(data, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11); - data += sc_numVars; - remainder -= sc_blockSize; - } - - // mix in the last partial block, and the length mod sc_blockSize - memset(&((uint8 *)data)[remainder], 0, (sc_blockSize - remainder)); - - ((uint8 *)data)[sc_blockSize - 1] = remainder; - - // do some final mixing - End(data, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11); - - *hash1 = h0; - *hash2 = h1; -} diff --git a/experiments/issue693/hash-microbenchmark/SpookyV2.h b/experiments/issue693/hash-microbenchmark/SpookyV2.h deleted file mode 100644 index 87facbf964..0000000000 --- a/experiments/issue693/hash-microbenchmark/SpookyV2.h +++ /dev/null @@ -1,414 +0,0 @@ -// -// SpookyHash: a 128-bit noncryptographic hash function -// By Bob Jenkins, public domain -// Oct 31 2010: alpha, framework + SpookyHash::Mix appears right -// Oct 31 2011: alpha again, Mix only good to 2^^69 but rest appears right -// Dec 31 2011: beta, improved Mix, tested it for 2-bit deltas -// Feb 2 2012: production, same bits as beta -// Feb 5 2012: adjusted definitions of uint* to be more portable -// Mar 30 2012: 3 bytes/cycle, not 4. Alpha was 4 but wasn't thorough enough. -// August 5 2012: SpookyV2 (different results) -// -// Up to 3 bytes/cycle for long messages. Reasonably fast for short messages. -// All 1 or 2 bit deltas achieve avalanche within 1% bias per output bit. -// -// This was developed for and tested on 64-bit x86-compatible processors. -// It assumes the processor is little-endian. There is a macro -// controlling whether unaligned reads are allowed (by default they are). -// This should be an equally good hash on big-endian machines, but it will -// compute different results on them than on little-endian machines. -// -// Google's CityHash has similar specs to SpookyHash, and CityHash is faster -// on new Intel boxes. MD4 and MD5 also have similar specs, but they are orders -// of magnitude slower. CRCs are two or more times slower, but unlike -// SpookyHash, they have nice math for combining the CRCs of pieces to form -// the CRCs of wholes. There are also cryptographic hashes, but those are even -// slower than MD5. -// - -#include - -#ifdef _MSC_VER -# define INLINE __forceinline -typedef unsigned __int64 uint64; -typedef unsigned __int32 uint32; -typedef unsigned __int16 uint16; -typedef unsigned __int8 uint8; -#else -# include -# define INLINE inline -typedef uint64_t uint64; -typedef uint32_t uint32; -typedef uint16_t uint16; -typedef uint8_t uint8; -#endif - - -class SpookyHash { -public: - // - // SpookyHash: hash a single message in one call, produce 128-bit output - // - static void Hash128( - const void *message, // message to hash - size_t length, // length of message in bytes - uint64 *hash1, // in/out: in seed 1, out hash value 1 - uint64 *hash2); // in/out: in seed 2, out hash value 2 - - // - // Hash64: hash a single message in one call, return 64-bit output - // - static uint64 Hash64( - const void *message, // message to hash - size_t length, // length of message in bytes - uint64 seed) { // seed - uint64 hash1 = seed; - Hash128(message, length, &hash1, &seed); - return hash1; - } - - // - // Hash32: hash a single message in one call, produce 32-bit output - // - static uint32 Hash32( - const void *message, // message to hash - size_t length, // length of message in bytes - uint32 seed) { // seed - uint64 hash1 = seed, hash2 = seed; - Hash128(message, length, &hash1, &hash2); - return (uint32)hash1; - } - - // - // Init: initialize the context of a SpookyHash - // - void Init( - uint64 seed1, // any 64-bit value will do, including 0 - uint64 seed2); // different seeds produce independent hashes - - // - // Update: add a piece of a message to a SpookyHash state - // - void Update( - const void *message, // message fragment - size_t length); // length of message fragment in bytes - - - // - // Final: compute the hash for the current SpookyHash state - // - // This does not modify the state; you can keep updating it afterward - // - // The result is the same as if SpookyHash() had been called with - // all the pieces concatenated into one message. - // - void Final( - uint64 *hash1, // out only: first 64 bits of hash value. - uint64 *hash2); // out only: second 64 bits of hash value. - - // - // left rotate a 64-bit value by k bytes - // - static INLINE uint64 Rot64(uint64 x, int k) { - return (x << k) | (x >> (64 - k)); - } - - // - // This is used if the input is 96 bytes long or longer. - // - // The internal state is fully overwritten every 96 bytes. - // Every input bit appears to cause at least 128 bits of entropy - // before 96 other bytes are combined, when run forward or backward - // For every input bit, - // Two inputs differing in just that input bit - // Where "differ" means xor or subtraction - // And the base value is random - // When run forward or backwards one Mix - // I tried 3 pairs of each; they all differed by at least 212 bits. - // - static INLINE void Mix( - const uint64 *data, - uint64 &s0, uint64 &s1, uint64 &s2, uint64 &s3, - uint64 &s4, uint64 &s5, uint64 &s6, uint64 &s7, - uint64 &s8, uint64 &s9, uint64 &s10, uint64 &s11) { - s0 += data[0]; - s2 ^= s10; - s11 ^= s0; - s0 = Rot64(s0, 11); - s11 += s1; - s1 += data[1]; - s3 ^= s11; - s0 ^= s1; - s1 = Rot64(s1, 32); - s0 += s2; - s2 += data[2]; - s4 ^= s0; - s1 ^= s2; - s2 = Rot64(s2, 43); - s1 += s3; - s3 += data[3]; - s5 ^= s1; - s2 ^= s3; - s3 = Rot64(s3, 31); - s2 += s4; - s4 += data[4]; - s6 ^= s2; - s3 ^= s4; - s4 = Rot64(s4, 17); - s3 += s5; - s5 += data[5]; - s7 ^= s3; - s4 ^= s5; - s5 = Rot64(s5, 28); - s4 += s6; - s6 += data[6]; - s8 ^= s4; - s5 ^= s6; - s6 = Rot64(s6, 39); - s5 += s7; - s7 += data[7]; - s9 ^= s5; - s6 ^= s7; - s7 = Rot64(s7, 57); - s6 += s8; - s8 += data[8]; - s10 ^= s6; - s7 ^= s8; - s8 = Rot64(s8, 55); - s7 += s9; - s9 += data[9]; - s11 ^= s7; - s8 ^= s9; - s9 = Rot64(s9, 54); - s8 += s10; - s10 += data[10]; - s0 ^= s8; - s9 ^= s10; - s10 = Rot64(s10, 22); - s9 += s11; - s11 += data[11]; - s1 ^= s9; - s10 ^= s11; - s11 = Rot64(s11, 46); - s10 += s0; - } - - // - // Mix all 12 inputs together so that h0, h1 are a hash of them all. - // - // For two inputs differing in just the input bits - // Where "differ" means xor or subtraction - // And the base value is random, or a counting value starting at that bit - // The final result will have each bit of h0, h1 flip - // For every input bit, - // with probability 50 +- .3% - // For every pair of input bits, - // with probability 50 +- 3% - // - // This does not rely on the last Mix() call having already mixed some. - // Two iterations was almost good enough for a 64-bit result, but a - // 128-bit result is reported, so End() does three iterations. - // - static INLINE void EndPartial( - uint64 &h0, uint64 &h1, uint64 &h2, uint64 &h3, - uint64 &h4, uint64 &h5, uint64 &h6, uint64 &h7, - uint64 &h8, uint64 &h9, uint64 &h10, uint64 &h11) { - h11 += h1; - h2 ^= h11; - h1 = Rot64(h1, 44); - h0 += h2; - h3 ^= h0; - h2 = Rot64(h2, 15); - h1 += h3; - h4 ^= h1; - h3 = Rot64(h3, 34); - h2 += h4; - h5 ^= h2; - h4 = Rot64(h4, 21); - h3 += h5; - h6 ^= h3; - h5 = Rot64(h5, 38); - h4 += h6; - h7 ^= h4; - h6 = Rot64(h6, 33); - h5 += h7; - h8 ^= h5; - h7 = Rot64(h7, 10); - h6 += h8; - h9 ^= h6; - h8 = Rot64(h8, 13); - h7 += h9; - h10 ^= h7; - h9 = Rot64(h9, 38); - h8 += h10; - h11 ^= h8; - h10 = Rot64(h10, 53); - h9 += h11; - h0 ^= h9; - h11 = Rot64(h11, 42); - h10 += h0; - h1 ^= h10; - h0 = Rot64(h0, 54); - } - - static INLINE void End( - const uint64 *data, - uint64 &h0, uint64 &h1, uint64 &h2, uint64 &h3, - uint64 &h4, uint64 &h5, uint64 &h6, uint64 &h7, - uint64 &h8, uint64 &h9, uint64 &h10, uint64 &h11) { - h0 += data[0]; - h1 += data[1]; - h2 += data[2]; - h3 += data[3]; - h4 += data[4]; - h5 += data[5]; - h6 += data[6]; - h7 += data[7]; - h8 += data[8]; - h9 += data[9]; - h10 += data[10]; - h11 += data[11]; - EndPartial(h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11); - EndPartial(h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11); - EndPartial(h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11); - } - - // - // The goal is for each bit of the input to expand into 128 bits of - // apparent entropy before it is fully overwritten. - // n trials both set and cleared at least m bits of h0 h1 h2 h3 - // n: 2 m: 29 - // n: 3 m: 46 - // n: 4 m: 57 - // n: 5 m: 107 - // n: 6 m: 146 - // n: 7 m: 152 - // when run forwards or backwards - // for all 1-bit and 2-bit diffs - // with diffs defined by either xor or subtraction - // with a base of all zeros plus a counter, or plus another bit, or random - // - static INLINE void ShortMix(uint64 &h0, uint64 &h1, uint64 &h2, uint64 &h3) { - h2 = Rot64(h2, 50); - h2 += h3; - h0 ^= h2; - h3 = Rot64(h3, 52); - h3 += h0; - h1 ^= h3; - h0 = Rot64(h0, 30); - h0 += h1; - h2 ^= h0; - h1 = Rot64(h1, 41); - h1 += h2; - h3 ^= h1; - h2 = Rot64(h2, 54); - h2 += h3; - h0 ^= h2; - h3 = Rot64(h3, 48); - h3 += h0; - h1 ^= h3; - h0 = Rot64(h0, 38); - h0 += h1; - h2 ^= h0; - h1 = Rot64(h1, 37); - h1 += h2; - h3 ^= h1; - h2 = Rot64(h2, 62); - h2 += h3; - h0 ^= h2; - h3 = Rot64(h3, 34); - h3 += h0; - h1 ^= h3; - h0 = Rot64(h0, 5); - h0 += h1; - h2 ^= h0; - h1 = Rot64(h1, 36); - h1 += h2; - h3 ^= h1; - } - - // - // Mix all 4 inputs together so that h0, h1 are a hash of them all. - // - // For two inputs differing in just the input bits - // Where "differ" means xor or subtraction - // And the base value is random, or a counting value starting at that bit - // The final result will have each bit of h0, h1 flip - // For every input bit, - // with probability 50 +- .3% (it is probably better than that) - // For every pair of input bits, - // with probability 50 +- .75% (the worst case is approximately that) - // - static INLINE void ShortEnd(uint64 &h0, uint64 &h1, uint64 &h2, uint64 &h3) { - h3 ^= h2; - h2 = Rot64(h2, 15); - h3 += h2; - h0 ^= h3; - h3 = Rot64(h3, 52); - h0 += h3; - h1 ^= h0; - h0 = Rot64(h0, 26); - h1 += h0; - h2 ^= h1; - h1 = Rot64(h1, 51); - h2 += h1; - h3 ^= h2; - h2 = Rot64(h2, 28); - h3 += h2; - h0 ^= h3; - h3 = Rot64(h3, 9); - h0 += h3; - h1 ^= h0; - h0 = Rot64(h0, 47); - h1 += h0; - h2 ^= h1; - h1 = Rot64(h1, 54); - h2 += h1; - h3 ^= h2; - h2 = Rot64(h2, 32); - h3 += h2; - h0 ^= h3; - h3 = Rot64(h3, 25); - h0 += h3; - h1 ^= h0; - h0 = Rot64(h0, 63); - h1 += h0; - } - -private: - - // - // Short is used for messages under 192 bytes in length - // Short has a low startup cost, the normal mode is good for long - // keys, the cost crossover is at about 192 bytes. The two modes were - // held to the same quality bar. - // - static void Short( - const void *message, // message (array of bytes, not necessarily aligned) - size_t length, // length of message (in bytes) - uint64 *hash1, // in/out: in the seed, out the hash value - uint64 *hash2); // in/out: in the seed, out the hash value - - // number of uint64's in internal state - static const size_t sc_numVars = 12; - - // size of the internal state - static const size_t sc_blockSize = sc_numVars * 8; - - // size of buffer of unhashed data, in bytes - static const size_t sc_bufSize = 2 * sc_blockSize; - - // - // sc_const: a constant which: - // * is not zero - // * is odd - // * is a not-very-regular mix of 1's and 0's - // * does not need any other special mathematical properties - // - static const uint64 sc_const = 0xdeadbeefdeadbeefLL; - - uint64 m_data[2 * sc_numVars]; // unhashed data, for partial messages - uint64 m_state[sc_numVars]; // internal state of the hash - size_t m_length; // total length of the input so far - uint8 m_remainder; // length of unhashed data stashed in m_data -}; diff --git a/experiments/issue693/hash-microbenchmark/fast_hash.h b/experiments/issue693/hash-microbenchmark/fast_hash.h deleted file mode 100644 index 694ab372c8..0000000000 --- a/experiments/issue693/hash-microbenchmark/fast_hash.h +++ /dev/null @@ -1,149 +0,0 @@ -#ifndef FAST_HASH_H -#define FAST_HASH_H - -#include -#include -#include -#include -#include -#include -#include - -namespace fast_hash { -static_assert(sizeof(unsigned int) == 4, "unsigned int has unexpected size"); - -/* - Internal class storing the state of the hashing process. It should only be - instantiated by functions in this file. -*/ -class HashState { - std::uint32_t hash; - -public: - HashState() - : hash(0xdeadbeef) { - } - - void feed(std::uint32_t value) { - hash ^= value + 0x9e3779b9 + (hash << 6) + (hash >> 2); - } - - std::uint32_t get_hash32() { - return hash; - } - - std::uint64_t get_hash64() { - return (static_cast(hash) << 32) | hash; - } -}; - - -/* - These functions add a new object to an existing HashState object. - - To add hashing support for a user type X, provide an override - for utils::feed(HashState &hash_state, const X &value). -*/ -static_assert( - sizeof(int) == sizeof(std::uint32_t), - "int and uint32_t have different sizes"); -inline void feed(HashState &hash_state, int value) { - hash_state.feed(static_cast(value)); -} - -static_assert( - sizeof(unsigned int) == sizeof(std::uint32_t), - "unsigned int and uint32_t have different sizes"); -inline void feed(HashState &hash_state, unsigned int value) { - hash_state.feed(static_cast(value)); -} - -inline void feed(HashState &hash_state, std::uint64_t value) { - hash_state.feed(static_cast(value)); - value >>= 32; - hash_state.feed(static_cast(value)); -} - -template -void feed(HashState &hash_state, const T *p) { - // This is wasteful in 32-bit mode, but we plan to discontinue 32-bit compiles anyway. - feed(hash_state, reinterpret_cast(p)); -} - -template -void feed(HashState &hash_state, const std::pair &p) { - feed(hash_state, p.first); - feed(hash_state, p.second); -} - -template -void feed(HashState &hash_state, const std::vector &vec) { - /* - Feed vector size to ensure that no two different vectors of the same type - have the same code prefix. - */ - feed(hash_state, vec.size()); - for (const T &item : vec) { - feed(hash_state, item); - } -} - - -/* - Public hash functions. - - get_hash() is used internally by the HashMap and HashSet classes below. In - more exotic use cases, such as implementing a custom hash table, you can also - use `get_hash32()`, `get_hash64()` and `get_hash()` directly. -*/ -template -std::uint32_t get_hash32(const T &value) { - HashState hash_state; - feed(hash_state, value); - return hash_state.get_hash32(); -} - -template -std::uint64_t get_hash64(const T &value) { - HashState hash_state; - feed(hash_state, value); - return hash_state.get_hash64(); -} - -template -std::size_t get_hash(const T &value) { - return static_cast(get_hash64(value)); -} - - -// This struct should only be used by HashMap and HashSet below. -template -struct Hash { - std::size_t operator()(const T &val) const { - return get_hash(val); - } -}; - -/* - Aliases for hash sets and hash maps in user code. All user code should use - utils::UnorderedSet and utils::UnorderedMap instead of std::unordered_set and - std::unordered_map. - - To hash types that are not supported out of the box, implement utils::feed. -*/ -template -using HashMap = std::unordered_map>; - -template -using HashSet = std::unordered_set>; - - -/* Transitional aliases and functions */ -template -using UnorderedMap = std::unordered_map; - -template -using UnorderedSet = std::unordered_set; -} - -#endif diff --git a/experiments/issue693/hash-microbenchmark/hash.h b/experiments/issue693/hash-microbenchmark/hash.h deleted file mode 100644 index 3838a95bf1..0000000000 --- a/experiments/issue693/hash-microbenchmark/hash.h +++ /dev/null @@ -1,373 +0,0 @@ -#ifndef UTILS_HASH_H -#define UTILS_HASH_H - -#include -#include -#include -#include -#include -#include -#include - -namespace utils { -/* - We provide a family of hash functions that are supposedly higher - quality than what is guaranteed by the standard library. Changing a - single bit in the input should typically change around half of the - bits in the final hash value. The hash functions we previously used - turned out to cluster when we tried hash tables with open addressing - for state registries. - - The low-level hash functions are based on lookup3.c by Bob Jenkins, - May 2006, public domain. See http://www.burtleburtle.net/bob/c/lookup3.c. - - To hash an object x, it is represented as a sequence of 32-bit - pieces (called the "code" for x, written code(x) in the following) - that are "fed" to the main hashing function (implemented in class - HashState) one by one. This allows a compositional approach to - hashing. For example, the code for a pair p is the concatenation of - code(x.first) and code(x.second). - - A simpler compositional approach to hashing would first hash the - components of an object and then combine the hash values, and this - is what a previous version of our code did. The approach with an - explicit HashState object is stronger because the internal hash - state is larger (96 bits) than the final hash value and hence pairs - and where x and x' have the same hash value don't - necessarily collide. Another advantage of our approach is that we - can use the same overall hashing approach to generate hash values of - different types (e.g. 32-bit vs. 64-bit unsigned integers). - - To extend the hashing mechanism to further classes, provide a - template specialization for the "feed" function. This must satisfy - the following requirements: - - A) If x and y are objects of the same type, they should have code(x) - = code(y) iff x = y. That is, the code sequence should uniquely - describe each logically distinct object. - - This requirement avoids unnecessary hash collisions. Of course, - there will still be "necessary" hash collisions because different - code sequences can collide in the low-level hash function. - - B) To play nicely with composition, we additionally require that feed - implements a prefix code, i.e., for objects x != y of the same - type, code(x) must not be a prefix of code(y). - - This requirement makes it much easier to define non-colliding - code sequences for composite objects such as pairs via - concatenation: if != , then code(a) != code(a') - and code(b) != code(b') is *not* sufficient for concat(code(a), - code(b)) != concat(code(a'), code(b')). However, if we require a - prefix code, it *is* sufficient and the resulting code will again - be a prefix code. - - Note that objects "of the same type" is meant as "logical type" - rather than C++ type. - - For example, for objects such as vectors where we expect - different-length vectors to be combined in the same containers (= - have the same logical type), we include the length of the vector as - the first element in the code to ensure the prefix code property. - - In contrast, for integer arrays encoding states, we *do not* include - the length as a prefix because states of different sizes are - considered to be different logical types and should not be mixed in - the same container, even though they are represented by the same C++ - type. -*/ - -static_assert(sizeof(unsigned int) == 4, "unsigned int has unexpected size"); - -/* - Circular rotation (http://stackoverflow.com/a/31488147/224132). -*/ -inline unsigned int rotate(unsigned int value, unsigned int offset) { - return (value << offset) | (value >> (32 - offset)); -} - -/* - Internal class storing the state of the hashing process. It should only be - instantiated by functions in this file. -*/ -class HashState { - std::uint32_t a, b, c; - int pending_values; - - /* - Mix the three 32-bit values bijectively. - - Any information in (a, b, c) before mix() is still in (a, b, c) after - mix(). - */ - void mix() { - a -= c; - a ^= rotate(c, 4); - c += b; - b -= a; - b ^= rotate(a, 6); - a += c; - c -= b; - c ^= rotate(b, 8); - b += a; - a -= c; - a ^= rotate(c, 16); - c += b; - b -= a; - b ^= rotate(a, 19); - a += c; - c -= b; - c ^= rotate(b, 4); - b += a; - } - - /* - Final mixing of the three 32-bit values (a, b, c) into c. - - Triples of (a, b, c) differing in only a few bits will usually produce - values of c that look totally different. - */ - void final_mix() { - c ^= b; - c -= rotate(b, 14); - a ^= c; - a -= rotate(c, 11); - b ^= a; - b -= rotate(a, 25); - c ^= b; - c -= rotate(b, 16); - a ^= c; - a -= rotate(c, 4); - b ^= a; - b -= rotate(a, 14); - c ^= b; - c -= rotate(b, 24); - } - -public: - HashState() - : a(0xdeadbeef), - b(a), - c(a), - pending_values(0) { - } - - void feed_ints(const int *values, int length) { - // Handle most of the key. - while (length > 3) { - a += values[0]; - b += values[1]; - c += values[2]; - mix(); - length -= 3; - values += 3; - } - - // Handle the last 3 unsigned ints. All case statements fall through. - switch (length) { - case 3: - c += values[2]; - case 2: - b += values[1]; - case 1: - a += values[0]; - final_mix(); - // case 0: nothing left to add. - case 0: - break; - } - } - - void feed(std::uint32_t value) { - assert(pending_values != -1); - if (pending_values == 3) { - mix(); - pending_values = 0; - } - if (pending_values == 0) { - a += value; - ++pending_values; - } else if (pending_values == 1) { - b += value; - ++pending_values; - } else if (pending_values == 2) { - c += value; - ++pending_values; - } - } - - std::uint32_t get_hash32() { - assert(pending_values != -1); - if (pending_values) { - /* - pending_values == 0 can only hold if we never called - feed(), i.e., if we are hashing an empty sequence. - In this case we don't call final_mix for compatibility - with the original hash function by Jenkins. - */ - final_mix(); - } - pending_values = -1; - return c; - } - - std::uint64_t get_hash64() { - assert(pending_values != -1); - if (pending_values) { - // See comment for get_hash32. - final_mix(); - } - pending_values = -1; - return (static_cast(b) << 32) | c; - } -}; - - -/* - These functions add a new object to an existing HashState object. - - To add hashing support for a user type X, provide an override - for utils::feed(HashState &hash_state, const X &value). -*/ -static_assert( - sizeof(int) == sizeof(std::uint32_t), - "int and uint32_t have different sizes"); -inline void feed(HashState &hash_state, int value) { - hash_state.feed(static_cast(value)); -} - -static_assert( - sizeof(unsigned int) == sizeof(std::uint32_t), - "unsigned int and uint32_t have different sizes"); -inline void feed(HashState &hash_state, unsigned int value) { - hash_state.feed(static_cast(value)); -} - -inline void feed(HashState &hash_state, std::uint64_t value) { - hash_state.feed(static_cast(value)); - value >>= 32; - hash_state.feed(static_cast(value)); -} - -template -void feed(HashState &hash_state, const T *p) { - // This is wasteful in 32-bit mode, but we plan to discontinue 32-bit compiles anyway. - feed(hash_state, reinterpret_cast(p)); -} - -template -void feed(HashState &hash_state, const std::pair &p) { - feed(hash_state, p.first); - feed(hash_state, p.second); -} - -template -void feed(HashState &hash_state, const std::vector &vec) { - /* - Feed vector size to ensure that no two different vectors of the same type - have the same code prefix. - */ - feed(hash_state, vec.size()); - for (const T &item : vec) { - feed(hash_state, item); - } -} - - -/* - Public hash functions. - - get_hash() is used internally by the HashMap and HashSet classes below. In - more exotic use cases, such as implementing a custom hash table, you can also - use `get_hash32()`, `get_hash64()` and `get_hash()` directly. -*/ -template -std::uint32_t get_hash32(const T &value) { - HashState hash_state; - feed(hash_state, value); - return hash_state.get_hash32(); -} - -template -std::uint64_t get_hash64(const T &value) { - HashState hash_state; - feed(hash_state, value); - return hash_state.get_hash64(); -} - -template -std::size_t get_hash(const T &value) { - return static_cast(get_hash64(value)); -} - - -// This struct should only be used by HashMap and HashSet below. -template -struct Hash { - std::size_t operator()(const T &val) const { - return get_hash(val); - } -}; - -/* - Aliases for hash sets and hash maps in user code. All user code should use - utils::UnorderedSet and utils::UnorderedMap instead of std::unordered_set and - std::unordered_map. - - To hash types that are not supported out of the box, implement utils::feed. -*/ -template -using HashMap = std::unordered_map>; - -template -using HashSet = std::unordered_set>; - - -/* Transitional aliases and functions */ -template -using UnorderedMap = std::unordered_map; - -template -using UnorderedSet = std::unordered_set; - -template -inline void hash_combine(size_t &hash, const T &value) { - std::hash hasher; - /* - The combination of hash values is based on issue 6.18 in - http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2005/n1756.pdf. - Boost combines hash values in the same way. - */ - hash ^= hasher(value) + 0x9e3779b9 + (hash << 6) + (hash >> 2); -} - -template -size_t hash_sequence(const Sequence &data, size_t length) { - size_t hash = 0; - for (size_t i = 0; i < length; ++i) { - hash_combine(hash, data[i]); - } - return hash; -} -} - -namespace std { -template -struct hash> { - size_t operator()(const std::vector &vec) const { - return utils::hash_sequence(vec, vec.size()); - } -}; - -template -struct hash> { - size_t operator()(const std::pair &pair) const { - size_t hash = 0; - utils::hash_combine(hash, pair.first); - utils::hash_combine(hash, pair.second); - return hash; - } -}; -} - -#endif diff --git a/experiments/issue693/hash-microbenchmark/main.cc b/experiments/issue693/hash-microbenchmark/main.cc deleted file mode 100644 index cee5c19742..0000000000 --- a/experiments/issue693/hash-microbenchmark/main.cc +++ /dev/null @@ -1,274 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include "fast_hash.h" -#include "hash.h" -#include "SpookyV2.h" - -using namespace std; - - -static void benchmark(const string &desc, int num_calls, - const function &func) { - cout << "Running " << desc << " " << num_calls << " times:" << flush; - - clock_t start = clock(); - for (int j = 0; j < num_calls; ++j) - func(); - clock_t end = clock(); - double duration = static_cast(end - start) / CLOCKS_PER_SEC; - cout << " " << duration << "s" << endl; -} - - -static int scramble(int i) { - return (0xdeadbeef * i) ^ 0xfeedcafe; -} - - -#define rot32(x, k) (((x) << (k)) | ((x) >> (32 - (k)))) - -#define mix32(a, b, c) \ - { \ - a -= c; a ^= rot32(c, 4); c += b; \ - b -= a; b ^= rot32(a, 6); a += c; \ - c -= b; c ^= rot32(b, 8); b += a; \ - a -= c; a ^= rot32(c, 16); c += b; \ - b -= a; b ^= rot32(a, 19); a += c; \ - c -= b; c ^= rot32(b, 4); b += a; \ - } - -#define final32(a, b, c) \ - { \ - c ^= b; c -= rot32(b, 14); \ - a ^= c; a -= rot32(c, 11); \ - b ^= a; b -= rot32(a, 25); \ - c ^= b; c -= rot32(b, 16); \ - a ^= c; a -= rot32(c, 4); \ - b ^= a; b -= rot32(a, 14); \ - c ^= b; c -= rot32(b, 24); \ - } - -inline unsigned int hash_unsigned_int_sequence( - const int *k, unsigned int length, unsigned int initval) { - unsigned int a, b, c; - - // Set up the internal state. - a = b = c = 0xdeadbeef + (length << 2) + initval; - - // Handle most of the key. - while (length > 3) { - a += k[0]; - b += k[1]; - c += k[2]; - mix32(a, b, c); - length -= 3; - k += 3; - } - - // Handle the last 3 unsigned ints. All case statements fall through. - switch (length) { - case 3: - c += k[2]; - case 2: - b += k[1]; - case 1: - a += k[0]; - final32(a, b, c); - // case 0: nothing left to add. - case 0: - break; - } - - return c; -} - -struct BurtleBurtleHash { - std::size_t operator()(const std::vector &vec) const { - return hash_unsigned_int_sequence(vec.data(), vec.size(), 2016); - } -}; - -using BurtleBurtleHashSet = std::unordered_set, BurtleBurtleHash>; - - -struct HashWordHash { - std::size_t operator()(const std::vector &vec) const { - utils::HashState hash_state; - hash_state.feed_ints(vec.data(), vec.size()); - return hash_state.get_hash64(); - } -}; - - -struct SpookyV2Hash { - std::size_t operator()(const std::vector &vec) const { - return SpookyHash::Hash64(vec.data(), vec.size() * 4, 2016); - } -}; - -struct SpookyV2HashInt { - std::size_t operator()(int i) const { - return SpookyHash::Hash64(&i, sizeof(int), 2016); - } -}; - - -int main(int, char **) { - const int REPETITIONS = 2; - const int NUM_CALLS = 100000; - const int NUM_INSERTIONS = 100; - - for (int i = 0; i < REPETITIONS; ++i) { - benchmark("nothing", NUM_CALLS, [] () {}); - cout << endl; - benchmark("insert int with BoostHash", NUM_CALLS, - [&]() { - unordered_set s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.insert(scramble(i)); - } - }); - benchmark("insert int with BoostHashFeed", NUM_CALLS, - [&]() { - fast_hash::HashSet s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.insert(scramble(i)); - } - }); - benchmark("insert int with BurtleFeed", NUM_CALLS, - [&]() { - utils::HashSet s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.insert(scramble(i)); - } - }); - benchmark("insert int with SpookyHash", NUM_CALLS, - [&]() { - std::unordered_set s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.insert(scramble(i)); - } - }); - cout << endl; - - benchmark("insert pair with BoostHash", NUM_CALLS, - [&]() { - unordered_set> s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.insert(make_pair(scramble(i), scramble(i + 1))); - } - }); - benchmark("insert pair with BoostHashFeed", NUM_CALLS, - [&]() { - fast_hash::HashSet> s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.insert(make_pair(scramble(i), scramble(i + 1))); - } - }); - benchmark("insert pair with BurtleFeed", NUM_CALLS, - [&]() { - utils::HashSet> s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.insert(make_pair(scramble(i), scramble(i + 1))); - } - }); - cout << endl; - - for (int length : {1, 10, 100} - ) { - benchmark( - "insert vector of size " + to_string(length) + - " with BoostHash", NUM_CALLS, - [&]() { - unordered_set> s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - vector v; - v.reserve(length); - for (int j = 0; j < length; ++j) { - v.push_back(scramble(NUM_INSERTIONS * length + j)); - } - s.insert(v); - } - }); - benchmark( - "insert vector of size " + to_string(length) + - " with BoostHashFeed", NUM_CALLS, - [&]() { - fast_hash::HashSet> s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - vector v; - v.reserve(length); - for (int j = 0; j < length; ++j) { - v.push_back(scramble(NUM_INSERTIONS * length + j)); - } - s.insert(v); - } - }); - benchmark( - "insert vector of size " + to_string(length) + - " with BurtleVector", NUM_CALLS, - [&]() { - BurtleBurtleHashSet s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - vector v; - v.reserve(length); - for (int j = 0; j < length; ++j) { - v.push_back(scramble(NUM_INSERTIONS * length + j)); - } - s.insert(v); - } - }); - benchmark( - "insert vector of size " + to_string(length) + - " with BurtleFeed", NUM_CALLS, - [&]() { - utils::HashSet> s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - vector v; - v.reserve(length); - for (int j = 0; j < length; ++j) { - v.push_back(scramble(NUM_INSERTIONS * length + j)); - } - s.insert(v); - } - }); - benchmark( - "insert vector of size " + to_string(length) + - " with BurtleFeedVector", NUM_CALLS, - [&]() { - std::unordered_set, HashWordHash> s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - vector v; - v.reserve(length); - for (int j = 0; j < length; ++j) { - v.push_back(scramble(NUM_INSERTIONS * length + j)); - } - s.insert(v); - } - }); - benchmark( - "insert vector of size " + to_string(length) + - " with SpookyHash", NUM_CALLS, - [&]() { - std::unordered_set, SpookyV2Hash> s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - vector v; - v.reserve(length); - for (int j = 0; j < length; ++j) { - v.push_back(scramble(NUM_INSERTIONS * length + j)); - } - s.insert(v); - } - }); - cout << endl; - } - cout << endl; - } - - return 0; -} diff --git a/experiments/issue693/relativescatter.py b/experiments/issue693/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue693/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue693/v2-blind.py b/experiments/issue693/v2-blind.py deleted file mode 100755 index 0bd4143220..0000000000 --- a/experiments/issue693/v2-blind.py +++ /dev/null @@ -1,48 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue693-base", "issue693-v1", "issue693-v2"] -BUILDS = ["release32"] -SEARCHES = [ - ("blind", "astar(blind())"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue693/v3-opt.py b/experiments/issue693/v3-opt.py deleted file mode 100755 index f13bab9695..0000000000 --- a/experiments/issue693/v3-opt.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue693-v2", "issue693-v3"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("blind", "astar(blind())"), - ("divpot", "astar(diverse_potentials())"), - ("lmcut", "astar(lmcut())"), - ("cegar", "astar(cegar())"), - ("systematic2", "astar(cpdbs(systematic(2)))"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -attributes = [ - "coverage", "error", "expansions_until_last_jump", "memory", - "score_memory", "total_time", "score_total_time"] - -# Compare revisions. -# lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32 -# lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64 -for build in BUILDS: - for rev1, rev2 in itertools.combinations(REVISIONS, 2): - algorithm_pairs = [ - ("{rev1}-{config_nick}-{build}".format(**locals()), - "{rev2}-{config_nick}-{build}".format(**locals()), - "Diff ({config_nick}-{build})".format(**locals())) - for config_nick, search in SEARCHES] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue693-opt-{rev1}-vs-{rev2}-{build}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue693/v3-sat.py b/experiments/issue693/v3-sat.py deleted file mode 100755 index 0366c7af4a..0000000000 --- a/experiments/issue693/v3-sat.py +++ /dev/null @@ -1,68 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue693-v2", "issue693-v3"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("ff_lazy", "lazy_greedy(ff())"), - ("cg_eager", "eager_greedy(cg())"), - ("lm_rhw", "lazy_greedy(lmcount(lm_rhw()))"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -attributes = [ - "coverage", "error", "expansions_until_last_jump", "memory", - "score_memory", "total_time", "score_total_time"] - -# Compare revisions. -# lmcut-base-32 vs. lmcut-v1-32 vs. lmcut-v3-32 -# lmcut-base-64 vs. lmcut-v1-64 vs. lmcut-v3-64 -for build in BUILDS: - for rev1, rev2 in itertools.combinations(REVISIONS, 2): - algorithm_pairs = [ - ("{rev1}-{config_nick}-{build}".format(**locals()), - "{rev2}-{config_nick}-{build}".format(**locals()), - "Diff ({config_nick}-{build})".format(**locals())) - for config_nick, search in SEARCHES] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue693-sat-{rev1}-vs-{rev2}-{build}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue693/v4-opt.py b/experiments/issue693/v4-opt.py deleted file mode 100755 index b91b87ecda..0000000000 --- a/experiments/issue693/v4-opt.py +++ /dev/null @@ -1,74 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue693-v4-base", "issue693-v4"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("blind", "astar(blind())"), - ("divpot", "astar(diverse_potentials())"), - ("lmcut", "astar(lmcut())"), - ("cegar", "astar(cegar())"), - ("systematic2", "astar(cpdbs(systematic(2)))"), - ("mas", - "astar(merge_and_shrink(" - "shrink_strategy=shrink_bisimulation(greedy=false)," - "merge_strategy=merge_stateless(" - "merge_selector=score_based_filtering(" - "scoring_functions=[goal_relevance,dfp,total_order]))," - "label_reduction=exact(before_shrinking=true,before_merging=false)," - "max_states=50000,threshold_before_merge=1))") -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare revisions. -for build in BUILDS: - for rev1, rev2 in itertools.combinations(REVISIONS, 2): - algorithm_pairs = [ - ("{rev1}-{config_nick}-{build}".format(**locals()), - "{rev2}-{config_nick}-{build}".format(**locals()), - "Diff ({config_nick}-{build})".format(**locals())) - for config_nick, search in SEARCHES] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue693-opt-{rev1}-vs-{rev2}-{build}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue693/v4-sat.py b/experiments/issue693/v4-sat.py deleted file mode 100755 index 58323a45ff..0000000000 --- a/experiments/issue693/v4-sat.py +++ /dev/null @@ -1,71 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue693-v4-base", "issue693-v4"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("ff_lazy", ["--heuristic", "h=ff()", "--search", "lazy_greedy(h, preferred=h)"]), - ("add_lazy", ["--heuristic", "h=add()", "--search", "lazy_greedy(h, preferred=h)"]), - ("ff_eager", ["--heuristic", "h=ff()", "--search", "eager_greedy(h, preferred=h)"]), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - search, - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] + [ - IssueConfig( - "lama-first-{build}".format(**locals()), - [], - build_options=[build], - driver_options=["--build", build, "--alias", "lama-first"]) - for build in BUILDS -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare revisions. -for build in BUILDS: - for rev1, rev2 in itertools.combinations(REVISIONS, 2): - algorithm_pairs = [ - ("{rev1}-{config_nick}-{build}".format(**locals()), - "{rev2}-{config_nick}-{build}".format(**locals()), - "Diff ({config_nick}-{build})".format(**locals())) - for config_nick in [nick for nick, _ in SEARCHES] + ["lama-first"]] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue693-sat-{rev1}-vs-{rev2}-{build}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue693/v5-opt.py b/experiments/issue693/v5-opt.py deleted file mode 100755 index 344acf1aa4..0000000000 --- a/experiments/issue693/v5-opt.py +++ /dev/null @@ -1,74 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue693-v5-base", "issue693-v5"] -BUILDS = ["release32"] -SEARCHES = [ - ("blind", "astar(blind())"), - ("divpot", "astar(diverse_potentials())"), - ("lmcut", "astar(lmcut())"), - ("cegar", "astar(cegar())"), - ("systematic2", "astar(cpdbs(systematic(2)))"), - ("mas", - "astar(merge_and_shrink(" - "shrink_strategy=shrink_bisimulation(greedy=false)," - "merge_strategy=merge_stateless(" - "merge_selector=score_based_filtering(" - "scoring_functions=[goal_relevance,dfp,total_order]))," - "label_reduction=exact(before_shrinking=true,before_merging=false)," - "max_states=50000,threshold_before_merge=1))") -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build, "--search-time-limit", "5m"]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -# Compare revisions. -for build in BUILDS: - for rev1, rev2 in itertools.combinations(REVISIONS, 2): - algorithm_pairs = [ - ("{rev1}-{config_nick}-{build}".format(**locals()), - "{rev2}-{config_nick}-{build}".format(**locals()), - "Diff ({config_nick}-{build})".format(**locals())) - for config_nick, search in SEARCHES] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue693-opt-{rev1}-vs-{rev2}-{build}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue693/v6-blind.py b/experiments/issue693/v6-blind.py deleted file mode 100755 index c59af2d633..0000000000 --- a/experiments/issue693/v6-blind.py +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue693-v5", "issue693-v6"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("blind", "astar(blind())"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build, "--search-time-limit", "1m"]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# Compare revisions. -for build in BUILDS: - for rev1, rev2 in itertools.combinations(REVISIONS, 2): - algorithm_pairs = [ - ("{rev1}-{config_nick}-{build}".format(**locals()), - "{rev2}-{config_nick}-{build}".format(**locals()), - "Diff ({config_nick}-{build})".format(**locals())) - for config_nick, search in SEARCHES] - exp.add_report( - ComparativeReport( - algorithm_pairs, - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES), - name="issue693-opt-{rev1}-vs-{rev2}-{build}".format(**locals())) - - for config_nick, search in SEARCHES: - algorithms = [ - "{rev1}-{config_nick}-{build}".format(**locals()), - "{rev2}-{config_nick}-{build}".format(**locals())] - for attribute in ["total_time", "memory"]: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=algorithms, - get_category=lambda run1, run2: run1["domain"]), - name="issue693-relative-scatter-{config_nick}-{build}-{rev1}-vs-{rev2}-{attribute}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue693/v7-opt.py b/experiments/issue693/v7-opt.py deleted file mode 100755 index abb7ad32d3..0000000000 --- a/experiments/issue693/v7-opt.py +++ /dev/null @@ -1,48 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue693-v7-base", "issue693-v7"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("blind", "astar(blind())"), - ("lmcut", "astar(lmcut())"), - ("cegar", "astar(cegar())"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_comparison_table_step() -exp.run_steps() diff --git a/experiments/issue693/v7-sat.py b/experiments/issue693/v7-sat.py deleted file mode 100755 index fa66113610..0000000000 --- a/experiments/issue693/v7-sat.py +++ /dev/null @@ -1,42 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue693-v7-base", "issue693-v7"] -BUILDS = ["release32", "release64"] -CONFIGS = [ - IssueConfig( - "lama-first-{build}".format(**locals()), - [], - build_options=[build], - driver_options=["--build", build, "--alias", "lama-first"]) - for build in BUILDS -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_comparison_table_step() -exp.run_steps() diff --git a/experiments/issue693/v8-blind.py b/experiments/issue693/v8-blind.py deleted file mode 100755 index 9b6b380103..0000000000 --- a/experiments/issue693/v8-blind.py +++ /dev/null @@ -1,46 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue693-v7-base", "issue693-v7", "issue693-v8"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("blind", "astar(blind())"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_comparison_table_step() -exp.run_steps() diff --git a/experiments/issue694/common_setup.py b/experiments/issue694/common_setup.py deleted file mode 100644 index 7c2f99d29a..0000000000 --- a/experiments/issue694/common_setup.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue694/custom-parser.py b/experiments/issue694/custom-parser.py deleted file mode 100755 index e6bcc40f06..0000000000 --- a/experiments/issue694/custom-parser.py +++ /dev/null @@ -1,21 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - - -def main(): - parser = Parser() - parser.add_pattern( - "int_hash_set_load_factor", - "Int hash set load factor: \d+/\d+ = (.+)", - required=False, - type=float) - parser.add_pattern( - "int_hash_set_resizes", - "Int hash set resizes: (\d+)", - required=False, - type=int) - print "Running custom parser" - parser.parse() - -main() diff --git a/experiments/issue694/relativescatter.py b/experiments/issue694/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue694/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue694/v1-opt.py b/experiments/issue694/v1-opt.py deleted file mode 100755 index 7b124dba98..0000000000 --- a/experiments/issue694/v1-opt.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue694-v1-base", "issue694-v1"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("blind", "astar(blind())"), - ("lmcut", "astar(lmcut())"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -ATTRIBUTES = [ - "coverage", "error", "expansions_until_last_jump", "memory", - "score_memory", "total_time", "score_total_time", - "int_hash_set_load_factor", "int_hash_set_resizes"] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_command('run-custom-parser', [os.path.join(DIR, 'custom-parser.py')]) -exp.add_comparison_table_step(attributes=ATTRIBUTES) -for relative in [False, True]: - exp.add_scatter_plot_step(relative=relative, attributes=["memory", "total_time"]) - -exp.run_steps() diff --git a/experiments/issue694/v2-opt.py b/experiments/issue694/v2-opt.py deleted file mode 100755 index 63489ac742..0000000000 --- a/experiments/issue694/v2-opt.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue694-v2-base", "issue694-v2"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("blind", "astar(blind())"), - ("lmcut", "astar(lmcut())"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -ATTRIBUTES = [ - "coverage", "error", "expansions_until_last_jump", "memory", - "score_memory", "total_time", "score_total_time", - "int_hash_set_load_factor", "int_hash_set_resizes"] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_command('run-custom-parser', [os.path.join(DIR, 'custom-parser.py')]) -exp.add_comparison_table_step(attributes=ATTRIBUTES) -for relative in [False, True]: - exp.add_scatter_plot_step(relative=relative, attributes=["memory", "total_time"]) - -exp.run_steps() diff --git a/experiments/issue694/v3-opt.py b/experiments/issue694/v3-opt.py deleted file mode 100755 index 3413368585..0000000000 --- a/experiments/issue694/v3-opt.py +++ /dev/null @@ -1,52 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue694-v2", "issue694-v3"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("blind", "astar(blind())"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -ATTRIBUTES = [ - "coverage", "error", "expansions_until_last_jump", "memory", - "score_memory", "total_time", "score_total_time", - "int_hash_set_load_factor", "int_hash_set_resizes"] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_command('run-custom-parser', [os.path.join(DIR, 'custom-parser.py')]) -exp.add_comparison_table_step(attributes=ATTRIBUTES) -for relative in [False, True]: - exp.add_scatter_plot_step(relative=relative, attributes=["memory", "total_time"]) - -exp.run_steps() diff --git a/experiments/issue695/common_setup.py b/experiments/issue695/common_setup.py deleted file mode 100755 index 687019c482..0000000000 --- a/experiments/issue695/common_setup.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue695/relativescatter.py b/experiments/issue695/relativescatter.py deleted file mode 100755 index f74cb6e721..0000000000 --- a/experiments/issue695/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue695/v1-opt-64bit.py b/experiments/issue695/v1-opt-64bit.py deleted file mode 100755 index 69b1745360..0000000000 --- a/experiments/issue695/v1-opt-64bit.py +++ /dev/null @@ -1,62 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue695-base", "issue695-v1"] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="salome.eriksson@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -CONFIGS = [ - IssueConfig("astar_blind", [ - "--search", - "astar(blind())" - ], build_options = ["release64"], driver_options = ["--build", "release64"]), - IssueConfig("seq-opt-bjolp", [], build_options = ["release64"], - driver_options=["--build", "release64", "--alias", "seq-opt-bjolp"]), - -] - - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER) -exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER) -#exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER) -exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue695/v1-opt.py b/experiments/issue695/v1-opt.py deleted file mode 100755 index 6c9db1c215..0000000000 --- a/experiments/issue695/v1-opt.py +++ /dev/null @@ -1,61 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue695-base", "issue695-v1"] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="salome.eriksson@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -CONFIGS = [ - IssueConfig("astar_blind", [ - "--search", - "astar(blind())" - ]), - IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), - -] - - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER) -exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER) -#exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER) -exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue695/v2-opt.py b/experiments/issue695/v2-opt.py deleted file mode 100755 index 7d7155366f..0000000000 --- a/experiments/issue695/v2-opt.py +++ /dev/null @@ -1,77 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue695-base", "issue695-v2"] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="salome.eriksson@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -CONFIGS = [ - IssueConfig("astar_blind", [ - "--search", - "astar(blind())" - ]), - IssueConfig("astar_blind-64", [ - "--search", - "astar(blind())" - ], build_options = ["release64"], driver_options = ["--build", "release64"]), - - IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), - IssueConfig("seq-opt-bjolp-64", [], build_options = ["release64"], - driver_options=["--build", "release64", "--alias", "seq-opt-bjolp"]), - -] - - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER) -#exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER) -#exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER) -#exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue695/v3-lama.py b/experiments/issue695/v3-lama.py deleted file mode 100755 index 5a7a244cf1..0000000000 --- a/experiments/issue695/v3-lama.py +++ /dev/null @@ -1,60 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue695-v3-base", "issue695-v3"] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="salome.eriksson@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -CONFIGS = [ - IssueConfig("lama-first", [], driver_options=["--alias", "lama-first", "--overall-time-limit", "1m",]), -] - - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue696/common_setup.py b/experiments/issue696/common_setup.py deleted file mode 100644 index 4dff4aacfd..0000000000 --- a/experiments/issue696/common_setup.py +++ /dev/null @@ -1,335 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step( - 'publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step( - "publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step(step_name, make_scatter_plots)) diff --git a/experiments/issue696/relativescatter.py b/experiments/issue696/relativescatter.py deleted file mode 100644 index 14d5d42752..0000000000 --- a/experiments/issue696/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue696/v1-sat-m64.py b/experiments/issue696/v1-sat-m64.py deleted file mode 100755 index 1aacff2226..0000000000 --- a/experiments/issue696/v1-sat-m64.py +++ /dev/null @@ -1,71 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue696-base", "issue696-v1"] -CONFIGS = [ - IssueConfig( - "lazy_greedy_{}".format(heuristic), - ["--heuristic", "h={}()".format(heuristic), - "--search", "lazy_greedy(h, preferred=h)"], - build_options=["release64"], driver_options=["--build", "release64"]) - for heuristic in ["add", "cea", "cg", "ff"] -] + [ - IssueConfig( - "ehc_{}".format(heuristic), - ["--heuristic", "h={}()".format(heuristic), - "--search", "ehc(h, preferred=h)"], - build_options=["release64"], driver_options=["--build", "release64"]) - for heuristic in ["ff"] -] -SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', - 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', - 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', - 'elevators-sat11-strips', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', - 'openstacks-sat08-strips', 'openstacks-sat11-strips', - 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', - 'parcprinter-08-strips', 'parcprinter-sat11-strips', - 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', - 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', - 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] - -ENVIRONMENT = MaiaEnvironment( - priority=0, email="cedric.geissmann@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(attributes=["total_time", "memory"]) - -exp() diff --git a/experiments/issue696/v1-sat.py b/experiments/issue696/v1-sat.py deleted file mode 100755 index e597718b9a..0000000000 --- a/experiments/issue696/v1-sat.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue696-base", "issue696-v1"] -CONFIGS = [ - IssueConfig( - "lazy_greedy_{}".format(heuristic), - ["--heuristic", "h={}()".format(heuristic), - "--search", "lazy_greedy(h, preferred=h)"]) - for heuristic in ["add", "cea", "cg", "ff"] -] + [ - IssueConfig( - "ehc_{}".format(heuristic), - ["--heuristic", "h={}()".format(heuristic), - "--search", "ehc(h, preferred=h)"]) - for heuristic in ["ff"] -] -SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', - 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', - 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', - 'elevators-sat11-strips', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', - 'openstacks-sat08-strips', 'openstacks-sat11-strips', - 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', - 'parcprinter-08-strips', 'parcprinter-sat11-strips', - 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', - 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', - 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] - -ENVIRONMENT = MaiaEnvironment( - priority=0, email="cedric.geissmann@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(attributes=["total_time", "memory"]) - -exp() diff --git a/experiments/issue698/common_setup.py b/experiments/issue698/common_setup.py deleted file mode 100644 index 4dff4aacfd..0000000000 --- a/experiments/issue698/common_setup.py +++ /dev/null @@ -1,335 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step( - 'publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step( - "publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step(step_name, make_scatter_plots)) diff --git a/experiments/issue698/custom-parser.py b/experiments/issue698/custom-parser.py deleted file mode 100755 index 24745c7d39..0000000000 --- a/experiments/issue698/custom-parser.py +++ /dev/null @@ -1,19 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - - -class CustomParser(Parser): - def __init__(self): - Parser.__init__(self) - self.add_pattern( - "successor_generator_time", - "Building successor generator...done! \[t=(.+)s\]", - required=False, - type=float) - - -if __name__ == "__main__": - parser = CustomParser() - print "Running custom parser" - parser.parse() diff --git a/experiments/issue698/relativescatter.py b/experiments/issue698/relativescatter.py deleted file mode 100644 index 14d5d42752..0000000000 --- a/experiments/issue698/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue698/v1.py b/experiments/issue698/v1.py deleted file mode 100755 index 04d52863ce..0000000000 --- a/experiments/issue698/v1.py +++ /dev/null @@ -1,45 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os, sys - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue698-base", "issue698-v1"] -CONFIGS = [ - IssueConfig( - "blind", - ["--search", "astar(blind())"], - driver_options=["--search-time-limit", "60s"] - ) -] - -sys.path.append(BENCHMARKS_DIR) -import suites - -SUITE = suites.suite_optimal_strips() -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_command("parser", ["custom-parser.py"]) - -exp.add_comparison_table_step( - attributes=exp.DEFAULT_TABLE_ATTRIBUTES + - ["successor_generator_time", "reopened_until_last_jump"]) -exp.add_scatter_plot_step(attributes=["successor_generator_time"]) - -exp() diff --git a/experiments/issue700/common_setup.py b/experiments/issue700/common_setup.py deleted file mode 100644 index 5d2b40b61c..0000000000 --- a/experiments/issue700/common_setup.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue700/relativescatter.py b/experiments/issue700/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue700/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue700/v1-opt.py b/experiments/issue700/v1-opt.py deleted file mode 100755 index 1f04b9741c..0000000000 --- a/experiments/issue700/v1-opt.py +++ /dev/null @@ -1,50 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue700-base", "issue700-v1"] -CONFIGS = [ - IssueConfig("blind", ["--search", "astar(blind())"]), - IssueConfig("lmcut", ["--search", "astar(lmcut())"]), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attr in ["total_time", "search_time", "memory"]: - for rev1, rev2 in [("base", "v1")]: - for config_nick in ["blind", "lmcut"]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue700-%s-%s" % (rev1, config_nick), - "issue700-%s-%s" % (rev2, config_nick)], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue700-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2)) - -exp.run_steps() diff --git a/experiments/issue700/v1-opt2.py b/experiments/issue700/v1-opt2.py deleted file mode 100755 index e22b4d2de4..0000000000 --- a/experiments/issue700/v1-opt2.py +++ /dev/null @@ -1,50 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue700-base", "issue700-v1"] -CONFIGS = [ - IssueConfig("h2", ["--search", "astar(hm(2))"]), - IssueConfig("ipdb", ["--search", "astar(ipdb())"]), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attr in ["total_time", "search_time", "memory"]: - for rev1, rev2 in [("base", "v1")]: - for config_nick in ["h2", "ipdb"]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue700-%s-%s" % (rev1, config_nick), - "issue700-%s-%s" % (rev2, config_nick)], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue700-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2)) - -exp.run_steps() diff --git a/experiments/issue700/v1-sat.py b/experiments/issue700/v1-sat.py deleted file mode 100755 index 3bb2aa1c5d..0000000000 --- a/experiments/issue700/v1-sat.py +++ /dev/null @@ -1,58 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue700-base", "issue700-v1"] -CONFIGS = [ - IssueConfig( - "lama-first", - [], - driver_options=["--alias", "lama-first"]), - IssueConfig( - "lama", - [], - driver_options=["--alias", "seq-sat-lama-2011"]), - IssueConfig("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=[h])"]), -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step(filter_algorithm=["lama-first"]) -exp.add_comparison_table_step() - -for attr in ["total_time", "search_time", "memory"]: - for rev1, rev2 in [("base", "v1")]: - for config_nick in ["lama-first", "ehc_ff"]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue700-%s-%s" % (rev1, config_nick), - "issue700-%s-%s" % (rev2, config_nick)], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue700-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2)) - - -exp.run_steps() diff --git a/experiments/issue700/v1-sat2.py b/experiments/issue700/v1-sat2.py deleted file mode 100755 index 5a98d5462c..0000000000 --- a/experiments/issue700/v1-sat2.py +++ /dev/null @@ -1,50 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue700-base", "issue700-v1"] -CONFIGS = [ - IssueConfig("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=[h])"]), -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attr in ["total_time", "search_time", "memory"]: - for rev1, rev2 in [("base", "v1")]: - for config_nick in ["ehc_ff"]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue700-%s-%s" % (rev1, config_nick), - "issue700-%s-%s" % (rev2, config_nick)], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue700-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2)) - - -exp.run_steps() diff --git a/experiments/issue700/v2-opt.py b/experiments/issue700/v2-opt.py deleted file mode 100755 index abd9eec551..0000000000 --- a/experiments/issue700/v2-opt.py +++ /dev/null @@ -1,52 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue700-v2-base", "issue700-v2"] -CONFIGS = [ - IssueConfig("h2", ["--search", "astar(hm(2))"]), - IssueConfig("ipdb", ["--search", "astar(ipdb())"]), - IssueConfig("blind", ["--search", "astar(blind())"]), - IssueConfig("lmcut", ["--search", "astar(lmcut())"]), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attr in ["total_time", "search_time", "memory"]: - for rev1, rev2 in [("v2-base", "v2")]: - for config_nick in ["h2", "ipdb", "blind", "lmcut"]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue700-%s-%s" % (rev1, config_nick), - "issue700-%s-%s" % (rev2, config_nick)], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue700-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2)) - -exp.run_steps() diff --git a/experiments/issue700/v2-sat.py b/experiments/issue700/v2-sat.py deleted file mode 100755 index 5c085ca1af..0000000000 --- a/experiments/issue700/v2-sat.py +++ /dev/null @@ -1,54 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue700-v2-base", "issue700-v2"] -CONFIGS = [ - IssueConfig("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=[h])"]), - IssueConfig( - "lama-first", - [], - driver_options=["--alias", "lama-first"]), -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attr in ["total_time", "search_time", "memory"]: - for rev1, rev2 in [("v2-base", "v2")]: - for config_nick in ["ehc_ff", "lama-first"]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue700-%s-%s" % (rev1, config_nick), - "issue700-%s-%s" % (rev2, config_nick)], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue700-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2)) - - -exp.run_steps() diff --git a/experiments/issue700/v3-opt.py b/experiments/issue700/v3-opt.py deleted file mode 100755 index aa5915fd85..0000000000 --- a/experiments/issue700/v3-opt.py +++ /dev/null @@ -1,49 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue700-v2", "issue700-v3"] -CONFIGS = [ - IssueConfig("h2", ["--search", "astar(hm(2))"]), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attr in ["total_time", "search_time", "memory"]: - for rev1, rev2 in [("v2", "v3")]: - for config_nick in ["h2"]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue700-%s-%s" % (rev1, config_nick), - "issue700-%s-%s" % (rev2, config_nick)], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue700-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2)) - -exp.run_steps() diff --git a/experiments/issue701/common_setup.py b/experiments/issue701/common_setup.py deleted file mode 100644 index 4dff4aacfd..0000000000 --- a/experiments/issue701/common_setup.py +++ /dev/null @@ -1,335 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step( - 'publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step( - "publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step(step_name, make_scatter_plots)) diff --git a/experiments/issue701/relativescatter.py b/experiments/issue701/relativescatter.py deleted file mode 100644 index 14d5d42752..0000000000 --- a/experiments/issue701/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue701/v1.py b/experiments/issue701/v1.py deleted file mode 100755 index ae07ddea24..0000000000 --- a/experiments/issue701/v1.py +++ /dev/null @@ -1,40 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward import suites - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue701-base", "issue701-v1"] -CONFIGS = [ - IssueConfig( - alias, [], driver_options=["--alias", alias]) - for alias in [ - "seq-sat-fd-autotune-1", "seq-sat-fd-autotune-2", - "seq-sat-lama-2011", "seq-sat-fdss-2014"] -] -SUITE = suites.suite_all() -ENVIRONMENT = MaiaEnvironment( - priority=-10, email="manuel.heusner@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp() diff --git a/experiments/issue704/common_setup.py b/experiments/issue704/common_setup.py deleted file mode 100644 index 5d2b40b61c..0000000000 --- a/experiments/issue704/common_setup.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue704/relativescatter.py b/experiments/issue704/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue704/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue704/suites.py b/experiments/issue704/suites.py deleted file mode 100644 index 4615212cfd..0000000000 --- a/experiments/issue704/suites.py +++ /dev/null @@ -1,350 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import argparse -import textwrap - - -HELP = "Convert suite name to list of domains or tasks." - - -def suite_alternative_formulations(): - return ['airport-adl', 'no-mprime', 'no-mystery'] - - -def suite_ipc98_to_ipc04_adl(): - return [ - 'assembly', 'miconic-fulladl', 'miconic-simpleadl', - 'optical-telegraphs', 'philosophers', 'psr-large', - 'psr-middle', 'schedule', - ] - - -def suite_ipc98_to_ipc04_strips(): - return [ - 'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid', - 'gripper', 'logistics00', 'logistics98', 'miconic', 'movie', - 'mprime', 'mystery', 'pipesworld-notankage', 'psr-small', - 'satellite', 'zenotravel', - ] - - -def suite_ipc98_to_ipc04(): - # All IPC1-4 domains, including the trivial Movie. - return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips()) - - -def suite_ipc06_adl(): - return [ - 'openstacks', - 'pathways', - 'trucks', - ] - - -def suite_ipc06_strips_compilations(): - return [ - 'openstacks-strips', - 'pathways-noneg', - 'trucks-strips', - ] - - -def suite_ipc06_strips(): - return [ - 'pipesworld-tankage', - 'rovers', - 'storage', - 'tpp', - ] - - -def suite_ipc06(): - return sorted(suite_ipc06_adl() + suite_ipc06_strips()) - - -def suite_ipc08_common_strips(): - return [ - 'parcprinter-08-strips', - 'pegsol-08-strips', - 'scanalyzer-08-strips', - ] - - -def suite_ipc08_opt_adl(): - return ['openstacks-opt08-adl'] - - -def suite_ipc08_opt_strips(): - return sorted(suite_ipc08_common_strips() + [ - 'elevators-opt08-strips', - 'openstacks-opt08-strips', - 'sokoban-opt08-strips', - 'transport-opt08-strips', - 'woodworking-opt08-strips', - ]) - - -def suite_ipc08_opt(): - return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl()) - - -def suite_ipc08_sat_adl(): - return ['openstacks-sat08-adl'] - - -def suite_ipc08_sat_strips(): - return sorted(suite_ipc08_common_strips() + [ - # Note: cyber-security is missing. - 'elevators-sat08-strips', - 'openstacks-sat08-strips', - 'sokoban-sat08-strips', - 'transport-sat08-strips', - 'woodworking-sat08-strips', - ]) - - -def suite_ipc08_sat(): - return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl()) - - -def suite_ipc08(): - return sorted(set(suite_ipc08_opt() + suite_ipc08_sat())) - - -def suite_ipc11_opt(): - return [ - 'barman-opt11-strips', - 'elevators-opt11-strips', - 'floortile-opt11-strips', - 'nomystery-opt11-strips', - 'openstacks-opt11-strips', - 'parcprinter-opt11-strips', - 'parking-opt11-strips', - 'pegsol-opt11-strips', - 'scanalyzer-opt11-strips', - 'sokoban-opt11-strips', - 'tidybot-opt11-strips', - 'transport-opt11-strips', - 'visitall-opt11-strips', - 'woodworking-opt11-strips', - ] - - -def suite_ipc11_sat(): - return [ - 'barman-sat11-strips', - 'elevators-sat11-strips', - 'floortile-sat11-strips', - 'nomystery-sat11-strips', - 'openstacks-sat11-strips', - 'parcprinter-sat11-strips', - 'parking-sat11-strips', - 'pegsol-sat11-strips', - 'scanalyzer-sat11-strips', - 'sokoban-sat11-strips', - 'tidybot-sat11-strips', - 'transport-sat11-strips', - 'visitall-sat11-strips', - 'woodworking-sat11-strips', - ] - - -def suite_ipc11(): - return sorted(suite_ipc11_opt() + suite_ipc11_sat()) - - -def suite_ipc14_agl_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_agl_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-agl14-strips', - 'openstacks-agl14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_agl(): - return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips()) - - -def suite_ipc14_mco_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_mco_strips(): - return [ - 'barman-mco14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_mco(): - return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips()) - - -def suite_ipc14_opt_adl(): - return [ - 'cavediving-14-adl', - 'citycar-opt14-adl', - 'maintenance-opt14-adl', - ] - - -def suite_ipc14_opt_strips(): - return [ - 'barman-opt14-strips', - 'childsnack-opt14-strips', - 'floortile-opt14-strips', - 'ged-opt14-strips', - 'hiking-opt14-strips', - 'openstacks-opt14-strips', - 'parking-opt14-strips', - 'tetris-opt14-strips', - 'tidybot-opt14-strips', - 'transport-opt14-strips', - 'visitall-opt14-strips', - ] - - -def suite_ipc14_opt(): - return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips()) - - -def suite_ipc14_sat_adl(): - return [ - 'cavediving-14-adl', - 'citycar-sat14-adl', - 'maintenance-sat14-adl', - ] - - -def suite_ipc14_sat_strips(): - return [ - 'barman-sat14-strips', - 'childsnack-sat14-strips', - 'floortile-sat14-strips', - 'ged-sat14-strips', - 'hiking-sat14-strips', - 'openstacks-sat14-strips', - 'parking-sat14-strips', - 'tetris-sat14-strips', - 'thoughtful-sat14-strips', - 'transport-sat14-strips', - 'visitall-sat14-strips', - ] - - -def suite_ipc14_sat(): - return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips()) - - -def suite_ipc14(): - return sorted(set( - suite_ipc14_agl() + suite_ipc14_mco() + - suite_ipc14_opt() + suite_ipc14_sat())) - - -def suite_unsolvable(): - return sorted( - ['mystery:prob%02d.pddl' % index - for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] + - ['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl']) - - -def suite_optimal_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_opt_adl() + suite_ipc14_opt_adl()) - - -def suite_optimal_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() + - suite_ipc11_opt() + suite_ipc14_opt_strips()) - - -def suite_optimal(): - return sorted(suite_optimal_adl() + suite_optimal_strips()) - - -def suite_satisficing_adl(): - return sorted( - suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() + - suite_ipc08_sat_adl() + suite_ipc14_sat_adl()) - - -def suite_satisficing_strips(): - return sorted( - suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() + - suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() + - suite_ipc11_sat() + suite_ipc14_sat_strips()) - - -def suite_satisficing(): - return sorted(suite_satisficing_adl() + suite_satisficing_strips()) - - -def suite_all(): - return sorted( - suite_ipc98_to_ipc04() + suite_ipc06() + - suite_ipc06_strips_compilations() + suite_ipc08() + - suite_ipc11() + suite_ipc14() + suite_alternative_formulations()) - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("suite", help="suite name") - return parser.parse_args() - - -def main(): - prefix = "suite_" - suite_names = [ - name[len(prefix):] for name in sorted(globals().keys()) - if name.startswith(prefix)] - parser = argparse.ArgumentParser(description=HELP) - parser.add_argument("suite", choices=suite_names, help="suite name") - parser.add_argument( - "--width", default=72, type=int, - help="output line width (default: %(default)s). Use 1 for single " - "column.") - args = parser.parse_args() - suite_func = globals()[prefix + args.suite] - print(textwrap.fill( - str(suite_func()), - width=args.width, - break_long_words=False, - break_on_hyphens=False)) - - -if __name__ == "__main__": - main() diff --git a/experiments/issue704/v1.py b/experiments/issue704/v1.py deleted file mode 100755 index 113c07adcf..0000000000 --- a/experiments/issue704/v1.py +++ /dev/null @@ -1,49 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue704-base", "issue704-v1"] -CONFIGS = [ - IssueConfig('astar-blind-ssec', ['--search', 'astar(blind(), pruning=stubborn_sets_ec())']) -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue705/common_setup.py b/experiments/issue705/common_setup.py deleted file mode 100644 index 5d2b40b61c..0000000000 --- a/experiments/issue705/common_setup.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue705/csv_report.py b/experiments/issue705/csv_report.py deleted file mode 100644 index 6696c4504b..0000000000 --- a/experiments/issue705/csv_report.py +++ /dev/null @@ -1,11 +0,0 @@ -from downward.reports import PlanningReport - -class CSVReport(PlanningReport): - def get_text(self): - sep = " " - lines = [sep.join(self.attributes)] - for runs in self.problem_runs.values(): - for run in runs: - lines.append(sep.join([str(run.get(attribute, "nan")) - for attribute in self.attributes])) - return "\n".join(lines) diff --git a/experiments/issue705/relativescatter.py b/experiments/issue705/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue705/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue705/sg-parser.py b/experiments/issue705/sg-parser.py deleted file mode 100755 index 1e4e3db4bb..0000000000 --- a/experiments/issue705/sg-parser.py +++ /dev/null @@ -1,41 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -def add_absolute_and_relative(parser, attribute, pattern): - parser.add_pattern(attribute, pattern + ' (\d+) .+', required=False, type=int) - parser.add_pattern(attribute + '_rel', pattern + ' \d+ \((.+)\)', required=False, type=float) - - -parser = Parser() - -parser.add_pattern('sg_construction_time', 'SG construction time: (.+)s', required=False, type=float) -parser.add_pattern('sg_peak_mem_diff', 'SG construction peak memory difference: (\d+)', required=False, type=int) -parser.add_pattern('sg_size_estimate_total', 'SG size estimates: total: (\d+)', required=False, type=int) -add_absolute_and_relative(parser, 'sg_size_estimate_overhead', 'SG size estimates: object overhead:') -add_absolute_and_relative(parser, 'sg_size_estimate_operators', 'SG size estimates: operators:') -add_absolute_and_relative(parser, 'sg_size_estimate_switch_var', 'SG size estimates: switch var:') -add_absolute_and_relative(parser, 'sg_size_estimate_value_generator', 'SG size estimates: generator for value:') -add_absolute_and_relative(parser, 'sg_size_estimate_default_generator', 'SG size estimates: default generator:') -add_absolute_and_relative(parser, 'sg_size_estimate_next_generator', 'SG size estimates: next generator:') - -add_absolute_and_relative(parser, 'sg_counts_immediates', 'SG object counts: immediates:') -add_absolute_and_relative(parser, 'sg_counts_forks', 'SG object counts: forks:') -add_absolute_and_relative(parser, 'sg_counts_switches', 'SG object counts: switches:') -add_absolute_and_relative(parser, 'sg_counts_leaves', 'SG object counts: leaves:') -add_absolute_and_relative(parser, 'sg_counts_empty', 'SG object counts: empty:') - -add_absolute_and_relative(parser, 'sg_counts_switch_empty', 'SG switch statistics: immediate ops empty:') -add_absolute_and_relative(parser, 'sg_counts_switch_single', 'SG switch statistics: single immediate op:') -add_absolute_and_relative(parser, 'sg_counts_switch_more', 'SG switch statistics: more immediate ops:') - -add_absolute_and_relative(parser, 'sg_counts_leaf_empty', 'SG leaf statistics: applicable ops empty:') -add_absolute_and_relative(parser, 'sg_counts_leaf_single', 'SG leaf statistics: single applicable op:') -add_absolute_and_relative(parser, 'sg_counts_leaf_more', 'SG leaf statistics: more applicable ops:') - -add_absolute_and_relative(parser, 'sg_counts_switch_vector_single', 'SG switch statistics: vector single:') -add_absolute_and_relative(parser, 'sg_counts_switch_vector_small', 'SG switch statistics: vector small:') -add_absolute_and_relative(parser, 'sg_counts_switch_vector_large', 'SG switch statistics: vector large:') -add_absolute_and_relative(parser, 'sg_counts_switch_vector_full', 'SG switch statistics: vector full:') - -parser.parse() diff --git a/experiments/issue705/v1.py b/experiments/issue705/v1.py deleted file mode 100755 index 022cdb96cb..0000000000 --- a/experiments/issue705/v1.py +++ /dev/null @@ -1,110 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, arithmetic_mean, geometric_mean - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport -from csv_report import CSVReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue705-base", "issue705-v1", "issue705-v2", "issue705-v3"] -CONFIGS = [ - IssueConfig( - 'bounded-blind', - ['--search', 'astar(blind(), bound=0)'], - ) -] -SUITE = list(sorted(set(common_setup.DEFAULT_OPTIMAL_SUITE) | - set(common_setup.DEFAULT_SATISFICING_SUITE))) -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') -exp.add_command('sg-parser', ['{sg_parser}']) - -exp.add_absolute_report_step(attributes=[ - Attribute("sg_construction_time", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_peak_mem_diff", functions=[arithmetic_mean], min_wins=True), - - Attribute("sg_counts_empty", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_leaf_empty", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_leaf_more", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_leaf_single", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_leaves", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switch_empty", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switch_more", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switch_single", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switches", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_forks", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_immediates", functions=[arithmetic_mean], min_wins=True), - - Attribute("sg_size_estimate_default_generator", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_operators", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_overhead", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_switch_var", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_total", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_value_generator", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_next_generator", functions=[arithmetic_mean], min_wins=True), - - Attribute("sg_counts_empty_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_leaf_empty_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_leaf_more_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_leaf_single_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_leaves_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switch_empty_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switch_more_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switch_single_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switches_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_forks_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_immediates_rel", functions=[geometric_mean], min_wins=True), - - Attribute("sg_size_estimate_default_generator_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_size_estimate_operators_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_size_estimate_overhead_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_size_estimate_switch_var_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_size_estimate_value_generator_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_size_estimate_next_generator_rel", functions=[geometric_mean], min_wins=True), - - "error", - "run_dir", -]) - -exp.add_report(CSVReport(attributes=["algorithm", "domain", "sg_*", "translator_task_size"]), outfile="csvreport.csv") - -def add_sg_peak_mem_diff_per_task_size(run): - mem = run.get("sg_peak_mem_diff") - size = run.get("translator_task_size") - if mem and size: - run["sg_peak_mem_diff_per_task_size"] = mem / float(size) - return run - -for rev1, rev2 in [("base", "v1"), ("base", "v2"), ("base", "v3")]: - exp.add_report(RelativeScatterPlotReport( - attributes=["sg_peak_mem_diff_per_task_size"], - filter=add_sg_peak_mem_diff_per_task_size, - filter_algorithm=["issue705-%s-bounded-blind" % rev1, "issue705-%s-bounded-blind" % rev2], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue705-%s-%s.png" % (rev1, rev2)) - -exp.run_steps() diff --git a/experiments/issue705/v2.py b/experiments/issue705/v2.py deleted file mode 100755 index efecf4c61d..0000000000 --- a/experiments/issue705/v2.py +++ /dev/null @@ -1,120 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, arithmetic_mean, geometric_mean - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport -from csv_report import CSVReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue705-base", "issue705-v3", "issue705-v4", "issue705-v5"] -CONFIGS = [ - IssueConfig( - 'bounded-blind', - ['--search', 'astar(blind(), bound=0)'], - ) -] -SUITE = list(sorted(set(common_setup.DEFAULT_OPTIMAL_SUITE) | - set(common_setup.DEFAULT_SATISFICING_SUITE))) -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') -exp.add_command('sg-parser', ['{sg_parser}']) - -exp.add_absolute_report_step(attributes=[ - Attribute("sg_construction_time", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_peak_mem_diff", functions=[arithmetic_mean], min_wins=True), - - Attribute("sg_counts_empty", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_leaf_empty", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_leaf_more", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_leaf_single", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_leaves", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switch_empty", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switch_more", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switch_single", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switches", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_forks", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_immediates", functions=[arithmetic_mean], min_wins=True), - - Attribute("sg_size_estimate_default_generator", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_operators", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_overhead", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_switch_var", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_total", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_value_generator", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_next_generator", functions=[arithmetic_mean], min_wins=True), - - Attribute("sg_counts_switch_vector_single", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switch_vector_small", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switch_vector_large", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switch_vector_full", functions=[arithmetic_mean], min_wins=True), - - Attribute("sg_counts_empty_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_leaf_empty_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_leaf_more_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_leaf_single_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_leaves_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switch_empty_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switch_more_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switch_single_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switches_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_forks_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_immediates_rel", functions=[geometric_mean], min_wins=True), - - Attribute("sg_size_estimate_default_generator_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_size_estimate_operators_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_size_estimate_overhead_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_size_estimate_switch_var_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_size_estimate_value_generator_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_size_estimate_next_generator_rel", functions=[geometric_mean], min_wins=True), - - Attribute("sg_counts_switch_vector_single_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switch_vector_small_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switch_vector_large_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switch_vector_full_rel", functions=[geometric_mean], min_wins=True), - - "error", - "run_dir", -]) - -exp.add_report(CSVReport(attributes=["algorithm", "domain", "sg_*", "translator_task_size"]), outfile="csvreport.csv") - -def add_sg_peak_mem_diff_per_task_size(run): - mem = run.get("sg_peak_mem_diff") - size = run.get("translator_task_size") - if mem and size: - run["sg_peak_mem_diff_per_task_size"] = mem / float(size) - return run - -for rev1, rev2 in [("base", "v3"), ("base", "v4"), ("base", "v5"), ("v3", "v4"), ("v4", "v5")]: - exp.add_report(RelativeScatterPlotReport( - attributes=["sg_peak_mem_diff_per_task_size"], - filter=add_sg_peak_mem_diff_per_task_size, - filter_algorithm=["issue705-%s-bounded-blind" % rev1, "issue705-%s-bounded-blind" % rev2], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue705-%s-%s.png" % (rev1, rev2)) - -exp.run_steps() diff --git a/experiments/issue705/v3.py b/experiments/issue705/v3.py deleted file mode 100755 index ef111239a9..0000000000 --- a/experiments/issue705/v3.py +++ /dev/null @@ -1,120 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, arithmetic_mean, geometric_mean - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport -from csv_report import CSVReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue705-base", "issue705-v5", "issue705-v6"] -CONFIGS = [ - IssueConfig( - 'bounded-blind', - ['--search', 'astar(blind(), bound=0)'], - ) -] -SUITE = list(sorted(set(common_setup.DEFAULT_OPTIMAL_SUITE) | - set(common_setup.DEFAULT_SATISFICING_SUITE))) -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') -exp.add_command('sg-parser', ['{sg_parser}']) - -exp.add_absolute_report_step(attributes=[ - Attribute("sg_construction_time", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_peak_mem_diff", functions=[arithmetic_mean], min_wins=True), - - Attribute("sg_counts_empty", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_leaf_empty", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_leaf_more", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_leaf_single", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_leaves", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switch_empty", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switch_more", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switch_single", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switches", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_forks", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_immediates", functions=[arithmetic_mean], min_wins=True), - - Attribute("sg_size_estimate_default_generator", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_operators", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_overhead", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_switch_var", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_total", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_value_generator", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_size_estimate_next_generator", functions=[arithmetic_mean], min_wins=True), - - Attribute("sg_counts_switch_vector_single", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switch_vector_small", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switch_vector_large", functions=[arithmetic_mean], min_wins=True), - Attribute("sg_counts_switch_vector_full", functions=[arithmetic_mean], min_wins=True), - - Attribute("sg_counts_empty_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_leaf_empty_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_leaf_more_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_leaf_single_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_leaves_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switch_empty_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switch_more_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switch_single_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switches_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_forks_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_immediates_rel", functions=[geometric_mean], min_wins=True), - - Attribute("sg_size_estimate_default_generator_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_size_estimate_operators_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_size_estimate_overhead_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_size_estimate_switch_var_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_size_estimate_value_generator_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_size_estimate_next_generator_rel", functions=[geometric_mean], min_wins=True), - - Attribute("sg_counts_switch_vector_single_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switch_vector_small_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switch_vector_large_rel", functions=[geometric_mean], min_wins=True), - Attribute("sg_counts_switch_vector_full_rel", functions=[geometric_mean], min_wins=True), - - "error", - "run_dir", -]) - -exp.add_report(CSVReport(attributes=["algorithm", "domain", "sg_*", "translator_task_size"]), outfile="csvreport.csv") - -def add_sg_peak_mem_diff_per_task_size(run): - mem = run.get("sg_peak_mem_diff") - size = run.get("translator_task_size") - if mem and size: - run["sg_peak_mem_diff_per_task_size"] = mem / float(size) - return run - -for rev1, rev2 in [("base", "v6"), ("v5", "v6")]: - exp.add_report(RelativeScatterPlotReport( - attributes=["sg_peak_mem_diff_per_task_size"], - filter=add_sg_peak_mem_diff_per_task_size, - filter_algorithm=["issue705-%s-bounded-blind" % rev1, "issue705-%s-bounded-blind" % rev2], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue705-%s-%s.png" % (rev1, rev2)) - -exp.run_steps() diff --git a/experiments/issue705/v4.py b/experiments/issue705/v4.py deleted file mode 100755 index 155ffa37fd..0000000000 --- a/experiments/issue705/v4.py +++ /dev/null @@ -1,56 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, arithmetic_mean, geometric_mean - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport -from csv_report import CSVReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue705-base", "issue705-v3", "issue705-v5", "issue705-v6"] -CONFIGS = [ - IssueConfig( - 'astar-blind', - ['--search', 'astar(blind())'], - ) -] -SUITE = list(sorted(set(common_setup.DEFAULT_OPTIMAL_SUITE) | - set(common_setup.DEFAULT_SATISFICING_SUITE))) -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') -exp.add_command('sg-parser', ['{sg_parser}']) - -exp.add_comparison_table_step() - -for attr in ["total_time", "search_time", "sg_construction_time", "memory"]: - for rev1, rev2 in [("base", "v3"), ("base", "v5"), ("base", "v6")]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue705-%s-astar-blind" % rev1, "issue705-%s-astar-blind" % rev2], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue705-%s-%s-%s.png" % (attr, rev1, rev2)) - -exp.run_steps() diff --git a/experiments/issue705/v5.py b/experiments/issue705/v5.py deleted file mode 100755 index 87b0a77dc3..0000000000 --- a/experiments/issue705/v5.py +++ /dev/null @@ -1,67 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, arithmetic_mean, geometric_mean - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport -from csv_report import CSVReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue705-base", "issue705-v7"] -CONFIGS = [ - IssueConfig( - 'astar-blind', - ['--search', 'astar(blind())'], - ) -] -SUITE = list(sorted(set(common_setup.DEFAULT_OPTIMAL_SUITE) | - set(common_setup.DEFAULT_SATISFICING_SUITE))) -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') -exp.add_command('sg-parser', ['{sg_parser}']) - -exp.add_fetcher('data/issue705-v4-eval') - -exp.add_comparison_table_step() - -def add_sg_peak_mem_diff_per_task_size(run): - mem = run.get("sg_peak_mem_diff") - size = run.get("translator_task_size") - if mem and size: - run["sg_peak_mem_diff_per_task_size"] = mem / float(size) - return run - - -for attr in ["total_time", "search_time", "sg_construction_time", "memory", "sg_peak_mem_diff_per_task_size"]: - for rev1, rev2 in [("base", "v7"), ("v6", "v7")]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue705-%s-astar-blind" % rev1, "issue705-%s-astar-blind" % rev2], - filter=add_sg_peak_mem_diff_per_task_size, - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue705-%s-%s-%s.png" % (attr, rev1, rev2)) - -exp.run_steps() diff --git a/experiments/issue705/v6.py b/experiments/issue705/v6.py deleted file mode 100755 index 4e1c5133e8..0000000000 --- a/experiments/issue705/v6.py +++ /dev/null @@ -1,67 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, arithmetic_mean, geometric_mean - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport -from csv_report import CSVReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue705-base", "issue705-v7", "issue705-v8"] -CONFIGS = [ - IssueConfig( - 'astar-blind', - ['--search', 'astar(blind())'], - ) -] -SUITE = list(sorted(set(common_setup.DEFAULT_OPTIMAL_SUITE) | - set(common_setup.DEFAULT_SATISFICING_SUITE))) -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') -exp.add_command('sg-parser', ['{sg_parser}']) - -exp.add_fetcher('data/issue705-v4-eval') - -exp.add_comparison_table_step() - -def add_sg_peak_mem_diff_per_task_size(run): - mem = run.get("sg_peak_mem_diff") - size = run.get("translator_task_size") - if mem and size: - run["sg_peak_mem_diff_per_task_size"] = mem / float(size) - return run - - -for attr in ["total_time", "search_time", "sg_construction_time", "memory", "sg_peak_mem_diff_per_task_size"]: - for rev1, rev2 in [("base", "v8"), ("v7", "v8")]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue705-%s-astar-blind" % rev1, "issue705-%s-astar-blind" % rev2], - filter=add_sg_peak_mem_diff_per_task_size, - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue705-%s-%s-%s.png" % (attr, rev1, rev2)) - -exp.run_steps() diff --git a/experiments/issue705/v7.py b/experiments/issue705/v7.py deleted file mode 100755 index 16fee35ebd..0000000000 --- a/experiments/issue705/v7.py +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, arithmetic_mean, geometric_mean - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport -from csv_report import CSVReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue705-base", "issue705-v8", "issue705-v9", "issue705-v10", "issue705-v11"] -CONFIGS = [ - IssueConfig( - 'astar-blind', - ['--search', 'astar(blind())'], - ) -] -SUITE = list(sorted(set(common_setup.DEFAULT_OPTIMAL_SUITE) | - set(common_setup.DEFAULT_SATISFICING_SUITE))) -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') -exp.add_command('sg-parser', ['{sg_parser}']) - -exp.add_fetcher('data/issue705-v4-eval') - -exp.add_comparison_table_step() - -def add_sg_peak_mem_diff_per_task_size(run): - mem = run.get("sg_peak_mem_diff") - size = run.get("translator_task_size") - if mem and size: - run["sg_peak_mem_diff_per_task_size"] = mem / float(size) - return run - - -for attr in ["total_time", "search_time", "sg_construction_time", "memory", "sg_peak_mem_diff_per_task_size"]: - for rev1, rev2 in [("base", "v11"), ("v8", "v9"), ("v9", "v10"), ("v10", "v11")]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue705-%s-astar-blind" % rev1, "issue705-%s-astar-blind" % rev2], - filter=add_sg_peak_mem_diff_per_task_size, - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue705-%s-%s-%s.png" % (attr, rev1, rev2)) - -exp.add_report(CSVReport( - filter_algorithm="issue705-v11-astar-blind", - attributes=["algorithm", "domain", "sg_*", "translator_task_size"]), - outfile="csvreport.csv") - -exp.run_steps() diff --git a/experiments/issue705/v8.py b/experiments/issue705/v8.py deleted file mode 100755 index 2ec67d80db..0000000000 --- a/experiments/issue705/v8.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, arithmetic_mean, geometric_mean - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport -from csv_report import CSVReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue705-base", "issue705-v12"] -CONFIGS = [ - IssueConfig( - 'astar-blind', - ['--search', 'astar(blind())'], - ), - IssueConfig( - 'astar-lmcut', - ['--search', 'astar(lmcut())'], - ), - IssueConfig( - 'astar-cegar', - ['--search', 'astar(cegar())'], - ), - IssueConfig( - 'astar-ipdb', - ['--search', 'astar(ipdb())'], - ), - IssueConfig( - 'astar-lama-first', - [], - driver_options=['--alias', 'lama-first'], - ), -] -SUITE = list(sorted(set(common_setup.DEFAULT_OPTIMAL_SUITE) | - set(common_setup.DEFAULT_SATISFICING_SUITE))) -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') -exp.add_command('sg-parser', ['{sg_parser}']) - -exp.add_comparison_table_step() - -for attr in ["total_time", "search_time", "memory"]: - for rev1, rev2 in [("base", "v12")]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue705-%s-astar-blind" % rev1, "issue705-%s-astar-blind" % rev2], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue705-%s-%s-%s.png" % (attr, rev1, rev2)) - -exp.run_steps() diff --git a/experiments/issue707/common_setup.py b/experiments/issue707/common_setup.py deleted file mode 100644 index df2613bf87..0000000000 --- a/experiments/issue707/common_setup.py +++ /dev/null @@ -1,392 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - "unsolvable", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if matplotlib: - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue707/ms-parser.py b/experiments/issue707/ms-parser.py deleted file mode 100755 index a33b1f76f3..0000000000 --- a/experiments/issue707/ms-parser.py +++ /dev/null @@ -1,61 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) -parser.add_pattern('ms_atomic_construction_time', 't=(.+)s \(after computation of atomic transition systems\)', required=False, type=float) -parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -parser.parse() diff --git a/experiments/issue707/relativescatter.py b/experiments/issue707/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue707/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue707/v1-pruning-variants.py b/experiments/issue707/v1-pruning-variants.py deleted file mode 100755 index f2752d3d6e..0000000000 --- a/experiments/issue707/v1-pruning-variants.py +++ /dev/null @@ -1,85 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, geometric_mean - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue707-v1"] -CONFIGS = [ - IssueConfig('rl-b50k-pruneinit', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('dfp-b50k-pruneinit', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('rl-ginf-pruneinit', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('dfp-ginf-pruneinit', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('rl-f50k-pruneinit', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false))']), - IssueConfig('dfp-f50k-pruneinit', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false))']), - - IssueConfig('rl-b50k-prunegoal', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('dfp-b50k-prunegoal', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('rl-ginf-prunegoal', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('dfp-ginf-prunegoal', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('rl-f50k-prunegoal', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_irrelevant_states=false))']), - IssueConfig('dfp-f50k-prunegoal', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_irrelevant_states=false))']), - - IssueConfig('rl-b50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-b50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('rl-ginf-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-ginf-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('rl-f50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-f50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_absolute_report_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue707/v1.py b/experiments/issue707/v1.py deleted file mode 100755 index 8b69b0fc11..0000000000 --- a/experiments/issue707/v1.py +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, geometric_mean - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue707-base", "issue707-v1"] -CONFIGS = [ - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step() - -exp.run_steps() diff --git a/experiments/issue707/v2-compare.py b/experiments/issue707/v2-compare.py deleted file mode 100755 index 8034ef1bd3..0000000000 --- a/experiments/issue707/v2-compare.py +++ /dev/null @@ -1,84 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import subprocess - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = [] -CONFIGS = [] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_fetcher('data/issue707-v1-eval') -exp.add_fetcher('data/issue707-v2-pruning-variants-eval') - -outfile = os.path.join( - exp.eval_dir, - "issue707-v1-v2-dfp-compare.html") -exp.add_report(ComparativeReport(algorithm_pairs=[ - ('%s-dfp-b50k' % 'issue707-v1', '%s-dfp-b50k-nopruneunreachable' % 'issue707-v2'), - ('%s-dfp-b50k' % 'issue707-v1', '%s-dfp-b50k-nopruneirrelevant' % 'issue707-v2'), - ('%s-dfp-b50k' % 'issue707-v1', '%s-dfp-b50k-noprune' % 'issue707-v2'), - #('%s-dfp-f50k' % 'issue707-v1', '%s-dfp-f50k-nopruneunreachable' % 'issue707-v2'), - #('%s-dfp-f50k' % 'issue707-v1', '%s-dfp-f50k-nopruneirrelevant' % 'issue707-v2'), - #('%s-dfp-f50k' % 'issue707-v1', '%s-dfp-f50k-noprune' % 'issue707-v2'), - #('%s-dfp-ginf' % 'issue707-v1', '%s-dfp-ginf-nopruneunreachable' % 'issue707-v2'), - #('%s-dfp-ginf' % 'issue707-v1', '%s-dfp-ginf-nopruneirrelevant' % 'issue707-v2'), - #('%s-dfp-ginf' % 'issue707-v1', '%s-dfp-ginf-noprune' % 'issue707-v2'), -],attributes=attributes),outfile=outfile) -exp.add_step('publish-issue707-v1-v2-dfp-compare.html', subprocess.call, ['publish', outfile]) - -exp.run_steps() diff --git a/experiments/issue707/v2-pruning-variants.py b/experiments/issue707/v2-pruning-variants.py deleted file mode 100755 index 2a3df4b2af..0000000000 --- a/experiments/issue707/v2-pruning-variants.py +++ /dev/null @@ -1,85 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, geometric_mean - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue707-v2"] -CONFIGS = [ - IssueConfig('rl-b50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('dfp-b50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('rl-ginf-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('dfp-ginf-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('rl-f50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false))']), - IssueConfig('dfp-f50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false))']), - - IssueConfig('rl-b50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('dfp-b50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('rl-ginf-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('dfp-ginf-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('rl-f50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_irrelevant_states=false))']), - IssueConfig('dfp-f50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_irrelevant_states=false))']), - - IssueConfig('rl-b50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-b50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('rl-ginf-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-ginf-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('rl-f50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-f50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_absolute_report_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue707/v3-pruning-variants.py b/experiments/issue707/v3-pruning-variants.py deleted file mode 100755 index 4c30383a70..0000000000 --- a/experiments/issue707/v3-pruning-variants.py +++ /dev/null @@ -1,94 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, geometric_mean - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue707-v3"] -CONFIGS = [ - IssueConfig('rl-b50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('dfp-b50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('sccs-dfp-b50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('rl-ginf-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('dfp-ginf-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('sccs-dfp-ginf-nopruneunreachable', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('rl-f50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false))']), - IssueConfig('dfp-f50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false))']), - IssueConfig('sccs-dfp-f50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,prune_unreachable_states=false))']), - - IssueConfig('rl-b50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('dfp-b50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-b50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('rl-ginf-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('dfp-ginf-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-ginf-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('rl-f50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_irrelevant_states=false))']), - IssueConfig('dfp-f50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-f50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,prune_irrelevant_states=false))']), - - IssueConfig('rl-b50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-b50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-b50k-noprune', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('rl-ginf-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-ginf-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-ginf-noprune', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('rl-f50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-f50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-f50k-noprune', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=-100, email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_absolute_report_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue707/v3.py b/experiments/issue707/v3.py deleted file mode 100755 index 12af697a7f..0000000000 --- a/experiments/issue707/v3.py +++ /dev/null @@ -1,77 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, geometric_mean - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue707-base-v2", "issue707-v3"] -CONFIGS = [ - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-dfp-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('sccs-dfp-ginf', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('sccs-dfp-f50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000))']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=-100, email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step() - -exp.run_steps() diff --git a/experiments/issue707/v3a-pruning-variants.py b/experiments/issue707/v3a-pruning-variants.py deleted file mode 100755 index 8f4f2d8881..0000000000 --- a/experiments/issue707/v3a-pruning-variants.py +++ /dev/null @@ -1,98 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, geometric_mean - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue707-v3a"] -CONFIGS = [ - IssueConfig('rl-b50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('dfp-b50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('sccs-dfp-b50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('rl-ginf-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('dfp-ginf-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('sccs-dfp-ginf-nopruneunreachable', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('rl-f50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false))']), - IssueConfig('dfp-f50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false))']), - IssueConfig('sccs-dfp-f50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,prune_unreachable_states=false))']), - - IssueConfig('rl-b50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('dfp-b50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-b50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('rl-ginf-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('dfp-ginf-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-ginf-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('rl-f50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_irrelevant_states=false))']), - IssueConfig('dfp-f50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-f50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,prune_irrelevant_states=false))']), - - IssueConfig('rl-b50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-b50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-b50k-noprune', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('rl-ginf-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-ginf-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-ginf-noprune', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('rl-f50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-f50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-f50k-noprune', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))']), -] -SUITE = [ -'mystery:prob04.pddl', 'mystery:prob05.pddl', 'mystery:prob07.pddl', -'mystery:prob08.pddl', 'mystery:prob12.pddl', 'mystery:prob16.pddl', -'mystery:prob18.pddl', 'mystery:prob21.pddl', 'mystery:prob22.pddl', -'mystery:prob23.pddl', 'mystery:prob24.pddl'] -ENVIRONMENT = MaiaEnvironment( - priority=-100, email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_absolute_report_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue707/v4-pruning-variants.py b/experiments/issue707/v4-pruning-variants.py deleted file mode 100755 index a504cb41cf..0000000000 --- a/experiments/issue707/v4-pruning-variants.py +++ /dev/null @@ -1,91 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, geometric_mean - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue707-v4"] -CONFIGS = [ - IssueConfig('rl-b50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('dfp-b50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('sccs-dfp-b50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('rl-ginf-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('dfp-ginf-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('sccs-dfp-ginf-nopruneunreachable', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('rl-f50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false))']), - IssueConfig('dfp-f50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false))']), - IssueConfig('sccs-dfp-f50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,prune_unreachable_states=false))']), - - IssueConfig('rl-b50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('dfp-b50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-b50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('rl-ginf-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('dfp-ginf-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-ginf-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('rl-f50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_irrelevant_states=false))']), - IssueConfig('dfp-f50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-f50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,prune_irrelevant_states=false))']), - - IssueConfig('rl-b50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-b50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-b50k-noprune', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('rl-ginf-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-ginf-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-ginf-noprune', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('rl-f50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-f50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-f50k-noprune', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_absolute_report_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue707/v4.py b/experiments/issue707/v4.py deleted file mode 100755 index a16f037e47..0000000000 --- a/experiments/issue707/v4.py +++ /dev/null @@ -1,74 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, geometric_mean - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue707-base-v2", "issue707-v4"] -CONFIGS = [ - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-dfp-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('sccs-dfp-ginf', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('sccs-dfp-f50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000))']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step() - -exp.run_steps() diff --git a/experiments/issue707/v5-debug.py b/experiments/issue707/v5-debug.py deleted file mode 100755 index 38628e9397..0000000000 --- a/experiments/issue707/v5-debug.py +++ /dev/null @@ -1,74 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, geometric_mean - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue707-base-v2", "issue707-v5"] -CONFIGS = [ - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'],build_options=['--debug'], driver_options=['--debug', '--search-time-limit', '60s']), - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'],build_options=['--debug'], driver_options=['--debug', '--search-time-limit', '60s']), - IssueConfig('sccs-dfp-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))'],build_options=['--debug'], driver_options=['--debug', '--search-time-limit', '60s']), - - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))'],build_options=['--debug'], driver_options=['--debug', '--search-time-limit', '60s']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))'],build_options=['--debug'], driver_options=['--debug', '--search-time-limit', '60s']), - IssueConfig('sccs-dfp-ginf', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))'],build_options=['--debug'], driver_options=['--debug', '--search-time-limit', '60s']), - - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))'],build_options=['--debug'], driver_options=['--debug', '--search-time-limit', '60s']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))'],build_options=['--debug'], driver_options=['--debug', '--search-time-limit', '60s']), - IssueConfig('sccs-dfp-f50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000))'],build_options=['--debug'], driver_options=['--debug', '--search-time-limit', '60s']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step() - -exp.run_steps() diff --git a/experiments/issue707/v5-pruning-variants.py b/experiments/issue707/v5-pruning-variants.py deleted file mode 100755 index 9f0e602808..0000000000 --- a/experiments/issue707/v5-pruning-variants.py +++ /dev/null @@ -1,91 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, geometric_mean - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue707-v5"] -CONFIGS = [ - IssueConfig('rl-b50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('dfp-b50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('sccs-dfp-b50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('rl-ginf-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('dfp-ginf-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('sccs-dfp-ginf-nopruneunreachable', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false))']), - IssueConfig('rl-f50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false))']), - IssueConfig('dfp-f50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false))']), - IssueConfig('sccs-dfp-f50k-nopruneunreachable', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,prune_unreachable_states=false))']), - - IssueConfig('rl-b50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('dfp-b50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-b50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('rl-ginf-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('dfp-ginf-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-ginf-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_irrelevant_states=false))']), - IssueConfig('rl-f50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_irrelevant_states=false))']), - IssueConfig('dfp-f50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-f50k-nopruneirrelevant', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,prune_irrelevant_states=false))']), - - IssueConfig('rl-b50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-b50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-b50k-noprune', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('rl-ginf-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-ginf-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-ginf-noprune', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('rl-f50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('dfp-f50k-noprune', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('sccs-dfp-f50k-noprune', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_absolute_report_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue707/v5.py b/experiments/issue707/v5.py deleted file mode 100755 index d2602ff2f7..0000000000 --- a/experiments/issue707/v5.py +++ /dev/null @@ -1,74 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, geometric_mean - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue707-base-v2", "issue707-v5"] -CONFIGS = [ - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-dfp-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - - IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - IssueConfig('sccs-dfp-ginf', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - - IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - IssueConfig('sccs-dfp-f50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000))']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step() - -exp.run_steps() diff --git a/experiments/issue710/common_setup.py b/experiments/issue710/common_setup.py deleted file mode 100644 index df2613bf87..0000000000 --- a/experiments/issue710/common_setup.py +++ /dev/null @@ -1,392 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - "unsolvable", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if matplotlib: - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue710/ipdb-parser.py b/experiments/issue710/ipdb-parser.py deleted file mode 100755 index da9837e6b9..0000000000 --- a/experiments/issue710/ipdb-parser.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('hc_iterations', 'iPDB: iterations = (\d+)', required=False, type=int) -parser.add_pattern('hc_num_patters', 'iPDB: number of patterns = (\d+)', required=False, type=int) -parser.add_pattern('hc_size', 'iPDB: size = (\d+)', required=False, type=int) -parser.add_pattern('hc_num_generated', 'iPDB: generated = (\d+)', required=False, type=int) -parser.add_pattern('hc_num_rejected', 'iPDB: rejected = (\d+)', required=False, type=int) -parser.add_pattern('hc_max_pdb_size', 'iPDB: maximum pdb size = (\d+)', required=False, type=int) -parser.add_pattern('hc_hill_climbing_time', 'iPDB: hill climbing time: (.+)s', required=False, type=float) -parser.add_pattern('hc_total_time', 'Pattern generation \(hill climbing\) time: (.+)s', required=False, type=float) -parser.add_pattern('cpdbs_time', 'PDB collection construction time: (.+)s', required=False, type=float) - -def check_hc_constructed(content, props): - hc_time = props.get('hc_total_time') - abstraction_constructed = False - if hc_time is not None: - abstraction_constructed = True - props['hc_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_hc_constructed) - -def check_planner_exit_reason(content, props): - hc_abstraction_constructed = props.get('hc_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether hill climbing computation or search ran out of - # time or memory. - hc_out_of_time = False - hc_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if hc_abstraction_constructed == False: - if error == 'timeout': - hc_out_of_time = True - elif error == 'out-of-memory': - hc_out_of_memory = True - elif hc_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['hc_out_of_time'] = hc_out_of_time - props['hc_out_of_memory'] = hc_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -parser.parse() diff --git a/experiments/issue710/relativescatter.py b/experiments/issue710/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue710/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue710/v1.py b/experiments/issue710/v1.py deleted file mode 100755 index 63bd160132..0000000000 --- a/experiments/issue710/v1.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, geometric_mean - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue710-base", "issue710-v1"] -CONFIGS = [ - IssueConfig('cpdbs-hc', ['--search', 'astar(cpdbs(patterns=hillclimbing()))']), - IssueConfig('cpdbs-hc900', ['--search', 'astar(cpdbs(patterns=hillclimbing(max_time=900)))']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ipdb_parser', 'ipdb-parser.py', dest='ipdb-parser.py') -exp.add_command('ipdb-parser', ['{ipdb_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# ipdb attributes -extra_attributes = [ - Attribute('hc_iterations', absolute=True, min_wins=True), - Attribute('hc_num_patters', absolute=True, min_wins=True), - Attribute('hc_size', absolute=True, min_wins=True), - Attribute('hc_num_generated', absolute=True, min_wins=True), - Attribute('hc_num_rejected', absolute=True, min_wins=True), - Attribute('hc_max_pdb_size', absolute=True, min_wins=True), - Attribute('hc_hill_climbing_time', absolute=False, min_wins=True, functions=[geometric_mean]), - Attribute('hc_total_time', absolute=False, min_wins=True, functions=[geometric_mean]), - Attribute('cpdbs_time', absolute=False, min_wins=True, functions=[geometric_mean]), -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step() - -exp.run_steps() diff --git a/experiments/issue714/common_setup.py b/experiments/issue714/common_setup.py deleted file mode 100644 index 5d2b40b61c..0000000000 --- a/experiments/issue714/common_setup.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue714/relativescatter.py b/experiments/issue714/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue714/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue714/v1-configs.py b/experiments/issue714/v1-configs.py deleted file mode 100755 index a0093e1b96..0000000000 --- a/experiments/issue714/v1-configs.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from downward.reports import compare - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=[], - configs=[], - environment=ENVIRONMENT, -) - -compared_algorithms = [] -for search in ["eager_greedy", "lazy_greedy"]: - for h1, h2 in itertools.permutations(["cea", "cg", "ff"], 2): - rev = "issue714-base" - config_nick = "-".join([search, h1, h2]) - algo1 = common_setup.get_algo_nick(rev, config_nick) - exp.add_algorithm( - algo1, - common_setup.get_repo_base(), - rev, - [ - "--heuristic", "h{h1}={h1}".format(**locals()), - "--heuristic", "h{h2}={h2}".format(**locals()), - "--search", "{search}(h{h1}, h{h2}, preferred=[h{h1},h{h2}])".format(**locals())], - driver_options=["--search-time-limit", "1m"]) - - rev = "issue714-v1" - config_nick = "-".join([search, h1, h2]) - algo2 = common_setup.get_algo_nick(rev, config_nick) - exp.add_algorithm( - algo2, - common_setup.get_repo_base(), - rev, - [ - "--heuristic", "h{h1}={h1}".format(**locals()), - "--heuristic", "h{h2}={h2}".format(**locals()), - "--search", "{search}([h{h1},h{h2}], preferred=[h{h1},h{h2}])".format(**locals())], - driver_options=["--search-time-limit", "1m"]) - - compared_algorithms.append([algo1, algo2, "Diff ({config_nick})".format(**locals())]) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_report(compare.ComparativeReport( - compared_algorithms, - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES), - name=common_setup.get_experiment_name() + "-comparison") - -exp.run_steps() diff --git a/experiments/issue714/v1-portfolios.py b/experiments/issue714/v1-portfolios.py deleted file mode 100755 index 2efcb11c06..0000000000 --- a/experiments/issue714/v1-portfolios.py +++ /dev/null @@ -1,38 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue714-base", "issue714-v1"] -CONFIGS = [ - IssueConfig(alias, [], driver_options=["--alias", alias]) - for alias in [ - "seq-sat-fdss-1", "seq-sat-fdss-2", "seq-sat-fdss-2014", - "seq-sat-fd-autotune-1", "seq-sat-fd-autotune-2"] -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step(attributes=IssueExperiment.PORTFOLIO_ATTRIBUTES) -exp.add_comparison_table_step(attributes=IssueExperiment.PORTFOLIO_ATTRIBUTES) - -exp.run_steps() diff --git a/experiments/issue717/common_setup.py b/experiments/issue717/common_setup.py deleted file mode 100644 index 978b00fd15..0000000000 --- a/experiments/issue717/common_setup.py +++ /dev/null @@ -1,335 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab.steps import Step -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport as CompareConfigsReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step(Step( - 'publish-absolute-report', subprocess.call, ['publish', outfile])) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = CompareConfigsReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - self.add_step(Step( - "publish-comparison-tables", publish_comparison_tables)) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(Step(step_name, make_scatter_plots)) diff --git a/experiments/issue717/lama-synergy.py b/experiments/issue717/lama-synergy.py deleted file mode 100755 index 7d9cb96da2..0000000000 --- a/experiments/issue717/lama-synergy.py +++ /dev/null @@ -1,63 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue717-base"] -CONFIGS = [ - IssueConfig( - "lama-first-original", [], driver_options=["--alias", "lama-first"]) -] + [ - IssueConfig( - "lama-first-new", [], driver_options=["--alias", "lama-first-new"]) -] -SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', - 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', - 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', - 'elevators-sat11-strips', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', - 'openstacks-sat08-strips', 'openstacks-sat11-strips', - 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', - 'parcprinter-08-strips', 'parcprinter-sat11-strips', - 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', - 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', - 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] - -ENVIRONMENT = MaiaEnvironment( - priority=0, email="cedric.geissmann@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(attributes=["total_time", "memory"]) - -exp() diff --git a/experiments/issue717/relativescatter.py b/experiments/issue717/relativescatter.py deleted file mode 100644 index 14d5d42752..0000000000 --- a/experiments/issue717/relativescatter.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows how a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.configs[0], val1) - assert val2 > 0, (domain, problem, self.configs[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'log') diff --git a/experiments/issue717/v2.py b/experiments/issue717/v2.py deleted file mode 100755 index f70e8f1ee7..0000000000 --- a/experiments/issue717/v2.py +++ /dev/null @@ -1,78 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from downward.reports.compare import ComparativeReport - -from common_setup import IssueConfig, IssueExperiment, is_test_run - - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue717-v2"] -CONFIGS = [ - IssueConfig( - "lama-first-original", [], driver_options=["--alias", "lama-first"]) -] + [ - IssueConfig( - "lama-first-new", [], driver_options=["--alias", "lama-first-new"]) -] + [ - IssueConfig( - "lama-original", [], driver_options=["--alias", "seq-sat-lama-2011"]) -] + [ - IssueConfig( - "lama-new", [], driver_options=["--alias", "seq-sat-lama-2011-new"]) -] - -SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips', - 'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips', - 'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips', - 'elevators-sat11-strips', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl', - 'openstacks-sat08-strips', 'openstacks-sat11-strips', - 'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs', - 'parcprinter-08-strips', 'parcprinter-sat11-strips', - 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', - 'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips', - 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel'] - -ENVIRONMENT = MaiaEnvironment( - priority=0, email="cedric.geissmann@unibas.ch") - -if is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() - -algorithm_pairs = [ - ('issue717-v2-lama-first-original', 'issue717-v2-lama-first-new', 'Diff lama-first'), - ('issue717-v2-lama-original', 'issue717-v2-lama-new', 'Diff lama')] -exp.add_report(ComparativeReport( - algorithm_pairs, - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES)) - -exp.add_scatter_plot_step(attributes=["total_time", "memory"]) - -exp.run_steps() diff --git a/experiments/issue722/common_setup.py b/experiments/issue722/common_setup.py deleted file mode 100644 index e9c532698d..0000000000 --- a/experiments/issue722/common_setup.py +++ /dev/null @@ -1,391 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if matplotlib: - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue722/ms-parser.py b/experiments/issue722/ms-parser.py deleted file mode 100755 index c219b72ba5..0000000000 --- a/experiments/issue722/ms-parser.py +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) -parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) -parser.add_pattern('actual_search_time', 'Actual search time: (.+)s \[t=.+s\]', required=False, type=float) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -def check_proved_unsolvability(content, props): - proved_unsolvability = False - if props['coverage'] == 0: - for line in content.splitlines(): - if line == 'Completely explored state space -- no solution!': - proved_unsolvability = True - break - props['proved_unsolvability'] = proved_unsolvability - -parser.add_function(check_proved_unsolvability) - -parser.parse() diff --git a/experiments/issue722/relativescatter.py b/experiments/issue722/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue722/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue722/v1.py b/experiments/issue722/v1.py deleted file mode 100755 index 98ee50dd51..0000000000 --- a/experiments/issue722/v1.py +++ /dev/null @@ -1,76 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os -from lab.reports import Attribute, geometric_mean - -from lab.environments import LocalEnvironment, MaiaEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment, get_algo_nick, get_repo_base - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue722-base", "issue722-v1"] -CONFIGS = [ - IssueConfig('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - IssueConfig('sccs-dfp-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - - #IssueConfig('dfp-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - #IssueConfig('rl-ginf', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=true),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - #IssueConfig('sccs-dfp-ginf', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=true),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=infinity,threshold_before_merge=1))']), - - #IssueConfig('dfp-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - #IssueConfig('rl-f50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_fh(),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000))']), - #IssueConfig('sccs-dfp-f50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_fh(),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000))']), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="silvan.sievers@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) -proved_unsolvability = Attribute('proved_unsolvability', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - proved_unsolvability, - - ms_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step() -exp.add_scatter_plot_step() - -exp.run_steps() diff --git a/experiments/issue724/common_setup.py b/experiments/issue724/common_setup.py deleted file mode 100644 index 5d2b40b61c..0000000000 --- a/experiments/issue724/common_setup.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue724/relativescatter.py b/experiments/issue724/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue724/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue724/v1-opt.py b/experiments/issue724/v1-opt.py deleted file mode 100755 index 99e36a127c..0000000000 --- a/experiments/issue724/v1-opt.py +++ /dev/null @@ -1,48 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue724-base", "issue724-v1"] -CONFIGS = [ - IssueConfig('astar-blind', ['--search', 'astar(blind())']), - IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']), - IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), - -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="guillem.frances@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS", "PYTHONPATH"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue724/v1-sat.py b/experiments/issue724/v1-sat.py deleted file mode 100755 index 6ff92d10a4..0000000000 --- a/experiments/issue724/v1-sat.py +++ /dev/null @@ -1,47 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue724-base", "issue724-v2"] -CONFIGS = [ - IssueConfig('lama-first', [], driver_options=["--alias", "lama-first"]), - IssueConfig("ehc-ff", ["--search", "ehc(ff())"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="guillem.frances@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS", "PYTHONPATH"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue725/common_setup.py b/experiments/issue725/common_setup.py deleted file mode 100644 index 5d2b40b61c..0000000000 --- a/experiments/issue725/common_setup.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue725/relativescatter.py b/experiments/issue725/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue725/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue725/v1-opt.py b/experiments/issue725/v1-opt.py deleted file mode 100755 index d584b9fc46..0000000000 --- a/experiments/issue725/v1-opt.py +++ /dev/null @@ -1,49 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue725-base", "issue725-v1"] -CONFIGS = [ - IssueConfig("blind", ["--search", "astar(blind())"]), - IssueConfig("lmcut", ["--search", "astar(lmcut())"]), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attr in ["total_time", "search_time", "memory"]: - for rev1, rev2 in [("base", "v1")]: - for config_nick in ["blind", "lmcut"]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue725-%s-%s" % (rev1, config_nick), - "issue725-%s-%s" % (rev2, config_nick)], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue725-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2)) - -exp.run_steps() diff --git a/experiments/issue725/v1-sat.py b/experiments/issue725/v1-sat.py deleted file mode 100755 index 9cfc5c5069..0000000000 --- a/experiments/issue725/v1-sat.py +++ /dev/null @@ -1,57 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue725-base", "issue725-v1"] -CONFIGS = [ - IssueConfig( - "lama-first", - [], - driver_options=["--alias", "lama-first"]), - IssueConfig( - "lama", - [], - driver_options=["--alias", "seq-sat-lama-2011"]), - IssueConfig("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=h)"]), -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attr in ["total_time", "search_time", "memory"]: - for rev1, rev2 in [("base", "v1")]: - for config_nick in ["lama-first", "ehc_ff"]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue725-%s-%s" % (rev1, config_nick), - "issue725-%s-%s" % (rev2, config_nick)], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue725-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2)) - - -exp.run_steps() diff --git a/experiments/issue726/common_setup.py b/experiments/issue726/common_setup.py deleted file mode 100644 index 5d2b40b61c..0000000000 --- a/experiments/issue726/common_setup.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue726/relativescatter.py b/experiments/issue726/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue726/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue726/v1-opt.py b/experiments/issue726/v1-opt.py deleted file mode 100755 index 3102526da9..0000000000 --- a/experiments/issue726/v1-opt.py +++ /dev/null @@ -1,49 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue726-base", "issue726-v1"] -CONFIGS = [ - IssueConfig("blind", ["--search", "astar(blind())"]), - IssueConfig("lmcut", ["--search", "astar(lmcut())"]), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attr in ["total_time", "search_time", "memory"]: - for rev1, rev2 in [("base", "v1")]: - for config_nick in ["blind", "lmcut"]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue726-%s-%s" % (rev1, config_nick), - "issue726-%s-%s" % (rev2, config_nick)], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue726-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2)) - -exp.run_steps() diff --git a/experiments/issue726/v1-sat.py b/experiments/issue726/v1-sat.py deleted file mode 100755 index 9a6b5bdda6..0000000000 --- a/experiments/issue726/v1-sat.py +++ /dev/null @@ -1,56 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue726-base", "issue726-v1"] -CONFIGS = [ - IssueConfig( - "lama-first", - [], - driver_options=["--alias", "lama-first"]), - IssueConfig( - "lama", - [], - driver_options=["--alias", "seq-sat-lama-2011"]), - IssueConfig("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=h)"]), -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = MaiaEnvironment( - priority=0, email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attr in ["total_time", "search_time", "memory"]: - for rev1, rev2 in [("base", "v1")]: - for config_nick in ["lama-first", "ehc_ff"]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue726-%s-%s" % (rev1, config_nick), - "issue726-%s-%s" % (rev2, config_nick)], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue726-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2)) - -exp.run_steps() diff --git a/experiments/issue731/common_setup.py b/experiments/issue731/common_setup.py deleted file mode 100644 index fe0b9d655e..0000000000 --- a/experiments/issue731/common_setup.py +++ /dev/null @@ -1,398 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_running_on_cluster_login_node(): - return platform.node() == "login20.cluster.bc2.ch" - - -def can_publish(): - return is_running_on_cluster_login_node() or not is_running_on_cluster() - - -def publish(report_file): - if can_publish(): - subprocess.call(["publish", report_file]) - else: - print "publishing reports is not supported on this node" - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, name="make-absolute-report", outfile=outfile) - self.add_step("publish-absolute-report", publish, outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def get_revision_pairs_and_files(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - yield (rev1, rev2, outfile) - - def make_comparison_tables(): - for rev1, rev2, outfile in get_revision_pairs_and_files(): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for _, _, outfile in get_revision_pairs_and_files(): - publish(outfile) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step("publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue731/hash-microbenchmark/.gitignore b/experiments/issue731/hash-microbenchmark/.gitignore deleted file mode 100644 index 44e0458dfa..0000000000 --- a/experiments/issue731/hash-microbenchmark/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/.obj/ -/benchmark32 -/benchmark64 -/Makefile.depend diff --git a/experiments/issue731/hash-microbenchmark/Makefile b/experiments/issue731/hash-microbenchmark/Makefile deleted file mode 100644 index 3a4e98985c..0000000000 --- a/experiments/issue731/hash-microbenchmark/Makefile +++ /dev/null @@ -1,145 +0,0 @@ -DOWNWARD_BITWIDTH ?= 32 - -HEADERS = \ - hash.h \ - -SOURCES = main.cc -TARGET = benchmark - -default: release - -OBJECT_SUFFIX_RELEASE = .release$(DOWNWARD_BITWIDTH) -TARGET_SUFFIX_RELEASE = $(DOWNWARD_BITWIDTH) -OBJECT_SUFFIX_DEBUG = .debug$(DOWNWARD_BITWIDTH) -TARGET_SUFFIX_DEBUG = -debug$(DOWNWARD_BITWIDTH) -OBJECT_SUFFIX_PROFILE = .profile$(DOWNWARD_BITWIDTH) -TARGET_SUFFIX_PROFILE = -profile$(DOWNWARD_BITWIDTH) - -OBJECTS_RELEASE = $(SOURCES:%.cc=.obj/%$(OBJECT_SUFFIX_RELEASE).o) -TARGET_RELEASE = $(TARGET)$(TARGET_SUFFIX_RELEASE) - -OBJECTS_DEBUG = $(SOURCES:%.cc=.obj/%$(OBJECT_SUFFIX_DEBUG).o) -TARGET_DEBUG = $(TARGET)$(TARGET_SUFFIX_DEBUG) - -OBJECTS_PROFILE = $(SOURCES:%.cc=.obj/%$(OBJECT_SUFFIX_PROFILE).o) -TARGET_PROFILE = $(TARGET)$(TARGET_SUFFIX_PROFILE) - -DEPEND = $(CXX) -MM - -## CXXFLAGS, LDFLAGS, POSTLINKOPT are options for compiler and linker -## that are used for all three targets (release, debug, and profile). -## (POSTLINKOPT are options that appear *after* all object files.) - -ifeq ($(DOWNWARD_BITWIDTH), 32) - BITWIDTHOPT = -m32 -else ifeq ($(DOWNWARD_BITWIDTH), 64) - BITWIDTHOPT = -m64 -else - $(error Bad value for DOWNWARD_BITWIDTH) -endif - -CXXFLAGS = -CXXFLAGS += -g -CXXFLAGS += $(BITWIDTHOPT) -# Note: we write "-std=c++0x" rather than "-std=c++11" to support gcc 4.4. -CXXFLAGS += -std=c++0x -Wall -Wextra -pedantic -Wno-deprecated -Werror - -LDFLAGS = -LDFLAGS += $(BITWIDTHOPT) -LDFLAGS += -g - -POSTLINKOPT = - -CXXFLAGS_RELEASE = -O3 -DNDEBUG -fomit-frame-pointer -CXXFLAGS_DEBUG = -O3 -CXXFLAGS_PROFILE = -O3 -pg - -LDFLAGS_RELEASE = -LDFLAGS_DEBUG = -LDFLAGS_PROFILE = -pg - -POSTLINKOPT_RELEASE = -POSTLINKOPT_DEBUG = -POSTLINKOPT_PROFILE = - -LDFLAGS_RELEASE += -static -static-libgcc - -POSTLINKOPT_RELEASE += -Wl,-Bstatic -lrt -POSTLINKOPT_DEBUG += -lrt -POSTLINKOPT_PROFILE += -lrt - -all: release debug profile - -## Build rules for the release target follow. - -release: $(TARGET_RELEASE) - -$(TARGET_RELEASE): $(OBJECTS_RELEASE) - $(CXX) $(LDFLAGS) $(LDFLAGS_RELEASE) $(OBJECTS_RELEASE) $(POSTLINKOPT) $(POSTLINKOPT_RELEASE) -o $(TARGET_RELEASE) - -$(OBJECTS_RELEASE): .obj/%$(OBJECT_SUFFIX_RELEASE).o: %.cc - @mkdir -p $$(dirname $@) - $(CXX) $(CXXFLAGS) $(CXXFLAGS_RELEASE) -c $< -o $@ - -## Build rules for the debug target follow. - -debug: $(TARGET_DEBUG) - -$(TARGET_DEBUG): $(OBJECTS_DEBUG) - $(CXX) $(LDFLAGS) $(LDFLAGS_DEBUG) $(OBJECTS_DEBUG) $(POSTLINKOPT) $(POSTLINKOPT_DEBUG) -o $(TARGET_DEBUG) - -$(OBJECTS_DEBUG): .obj/%$(OBJECT_SUFFIX_DEBUG).o: %.cc - @mkdir -p $$(dirname $@) - $(CXX) $(CXXFLAGS) $(CXXFLAGS_DEBUG) -c $< -o $@ - -## Build rules for the profile target follow. - -profile: $(TARGET_PROFILE) - -$(TARGET_PROFILE): $(OBJECTS_PROFILE) - $(CXX) $(LDFLAGS) $(LDFLAGS_PROFILE) $(OBJECTS_PROFILE) $(POSTLINKOPT) $(POSTLINKOPT_PROFILE) -o $(TARGET_PROFILE) - -$(OBJECTS_PROFILE): .obj/%$(OBJECT_SUFFIX_PROFILE).o: %.cc - @mkdir -p $$(dirname $@) - $(CXX) $(CXXFLAGS) $(CXXFLAGS_PROFILE) -c $< -o $@ - -## Additional targets follow. - -PROFILE: $(TARGET_PROFILE) - ./$(TARGET_PROFILE) $(ARGS_PROFILE) - gprof $(TARGET_PROFILE) | (cleanup-profile 2> /dev/null || cat) > PROFILE - -clean: - rm -rf .obj - rm -f *~ *.pyc - rm -f Makefile.depend gmon.out PROFILE core - rm -f sas_plan - -distclean: clean - rm -f $(TARGET_RELEASE) $(TARGET_DEBUG) $(TARGET_PROFILE) - -## NOTE: If we just call gcc -MM on a source file that lives within a -## subdirectory, it will strip the directory part in the output. Hence -## the for loop with the sed call. - -Makefile.depend: $(SOURCES) $(HEADERS) - rm -f Makefile.temp - for source in $(SOURCES) ; do \ - $(DEPEND) $(CXXFLAGS) $$source > Makefile.temp0; \ - objfile=$${source%%.cc}.o; \ - sed -i -e "s@^[^:]*:@$$objfile:@" Makefile.temp0; \ - cat Makefile.temp0 >> Makefile.temp; \ - done - rm -f Makefile.temp0 Makefile.depend - sed -e "s@\(.*\)\.o:\(.*\)@.obj/\1$(OBJECT_SUFFIX_RELEASE).o:\2@" Makefile.temp >> Makefile.depend - sed -e "s@\(.*\)\.o:\(.*\)@.obj/\1$(OBJECT_SUFFIX_DEBUG).o:\2@" Makefile.temp >> Makefile.depend - sed -e "s@\(.*\)\.o:\(.*\)@.obj/\1$(OBJECT_SUFFIX_PROFILE).o:\2@" Makefile.temp >> Makefile.depend - rm -f Makefile.temp - -ifneq ($(MAKECMDGOALS),clean) - ifneq ($(MAKECMDGOALS),distclean) - -include Makefile.depend - endif -endif - -.PHONY: default all release debug profile clean distclean diff --git a/experiments/issue731/hash-microbenchmark/hash.h b/experiments/issue731/hash-microbenchmark/hash.h deleted file mode 100644 index 615038a14e..0000000000 --- a/experiments/issue731/hash-microbenchmark/hash.h +++ /dev/null @@ -1,358 +0,0 @@ -#ifndef UTILS_HASH_H -#define UTILS_HASH_H - -#include -#include -#include -#include -#include -#include -#include - -namespace utils { -/* - We provide a family of hash functions that are supposedly higher - quality than what is guaranteed by the standard library. Changing a - single bit in the input should typically change around half of the - bits in the final hash value. The hash functions we previously used - turned out to cluster when we tried hash tables with open addressing - for state registries. - - The low-level hash functions are based on lookup3.c by Bob Jenkins, - May 2006, public domain. See http://www.burtleburtle.net/bob/c/lookup3.c. - - To hash an object x, it is represented as a sequence of 32-bit - pieces (called the "code" for x, written code(x) in the following) - that are "fed" to the main hashing function (implemented in class - HashState) one by one. This allows a compositional approach to - hashing. For example, the code for a pair p is the concatenation of - code(x.first) and code(x.second). - - A simpler compositional approach to hashing would first hash the - components of an object and then combine the hash values, and this - is what a previous version of our code did. The approach with an - explicit HashState object is stronger because the internal hash - state is larger (96 bits) than the final hash value and hence pairs - and where x and x' have the same hash value don't - necessarily collide. Another advantage of our approach is that we - can use the same overall hashing approach to generate hash values of - different types (e.g. 32-bit vs. 64-bit unsigned integers). - - To extend the hashing mechanism to further classes, provide a - template specialization for the "feed" function. This must satisfy - the following requirements: - - A) If x and y are objects of the same type, they should have code(x) - = code(y) iff x = y. That is, the code sequence should uniquely - describe each logically distinct object. - - This requirement avoids unnecessary hash collisions. Of course, - there will still be "necessary" hash collisions because different - code sequences can collide in the low-level hash function. - - B) To play nicely with composition, we additionally require that feed - implements a prefix code, i.e., for objects x != y of the same - type, code(x) must not be a prefix of code(y). - - This requirement makes it much easier to define non-colliding - code sequences for composite objects such as pairs via - concatenation: if != , then code(a) != code(a') - and code(b) != code(b') is *not* sufficient for concat(code(a), - code(b)) != concat(code(a'), code(b')). However, if we require a - prefix code, it *is* sufficient and the resulting code will again - be a prefix code. - - Note that objects "of the same type" is meant as "logical type" - rather than C++ type. - - For example, for objects such as vectors where we expect - different-length vectors to be combined in the same containers (= - have the same logical type), we include the length of the vector as - the first element in the code to ensure the prefix code property. - - In contrast, for integer arrays encoding states, we *do not* include - the length as a prefix because states of different sizes are - considered to be different logical types and should not be mixed in - the same container, even though they are represented by the same C++ - type. -*/ - -/* - Circular rotation (http://stackoverflow.com/a/31488147/224132). -*/ -inline uint32_t rotate(uint32_t value, uint32_t offset) { - return (value << offset) | (value >> (32 - offset)); -} - -/* - Store the state of the hashing process. - - This class can either be used by specializing the template function - utils::feed() (recommended, see below), or by working with it directly. -*/ -class HashState { - std::uint32_t a, b, c; - int pending_values; - - /* - Mix the three 32-bit values bijectively. - - Any information in (a, b, c) before mix() is still in (a, b, c) after - mix(). - */ - void mix() { - a -= c; - a ^= rotate(c, 4); - c += b; - b -= a; - b ^= rotate(a, 6); - a += c; - c -= b; - c ^= rotate(b, 8); - b += a; - a -= c; - a ^= rotate(c, 16); - c += b; - b -= a; - b ^= rotate(a, 19); - a += c; - c -= b; - c ^= rotate(b, 4); - b += a; - } - - /* - Final mixing of the three 32-bit values (a, b, c) into c. - - Triples of (a, b, c) differing in only a few bits will usually produce - values of c that look totally different. - */ - void final_mix() { - c ^= b; - c -= rotate(b, 14); - a ^= c; - a -= rotate(c, 11); - b ^= a; - b -= rotate(a, 25); - c ^= b; - c -= rotate(b, 16); - a ^= c; - a -= rotate(c, 4); - b ^= a; - b -= rotate(a, 14); - c ^= b; - c -= rotate(b, 24); - } - -public: - HashState() - : a(0xdeadbeef), - b(a), - c(a), - pending_values(0) { - } - - void feed(std::uint32_t value) { - assert(pending_values != -1); - if (pending_values == 3) { - mix(); - pending_values = 0; - } - if (pending_values == 0) { - a += value; - ++pending_values; - } else if (pending_values == 1) { - b += value; - ++pending_values; - } else if (pending_values == 2) { - c += value; - ++pending_values; - } - } - - /* - After calling this method, it is illegal to use the HashState object - further, i.e., make further calls to feed, get_hash32 or get_hash64. We - set pending_values = -1 to catch such illegal usage in debug mode. - */ - std::uint32_t get_hash32() { - assert(pending_values != -1); - if (pending_values) { - /* - pending_values == 0 can only hold if we never called - feed(), i.e., if we are hashing an empty sequence. - In this case we don't call final_mix for compatibility - with the original hash function by Jenkins. - */ - final_mix(); - } - pending_values = -1; - return c; - } - - /* - See comment for get_hash32. - */ - std::uint64_t get_hash64() { - assert(pending_values != -1); - if (pending_values) { - // See comment for get_hash32. - final_mix(); - } - pending_values = -1; - return (static_cast(b) << 32) | c; - } -}; - - -/* - These functions add a new object to an existing HashState object. - - To add hashing support for a user type X, provide an override - for utils::feed(HashState &hash_state, const X &value). -*/ -static_assert( - sizeof(int) == sizeof(std::uint32_t), - "int and uint32_t have different sizes"); -inline void feed(HashState &hash_state, int value) { - hash_state.feed(static_cast(value)); -} - -static_assert( - sizeof(unsigned int) == sizeof(std::uint32_t), - "unsigned int and uint32_t have different sizes"); -inline void feed(HashState &hash_state, unsigned int value) { - hash_state.feed(static_cast(value)); -} - -inline void feed(HashState &hash_state, std::uint64_t value) { - hash_state.feed(static_cast(value)); - value >>= 32; - hash_state.feed(static_cast(value)); -} - -template -void feed(HashState &hash_state, const T *p) { - // This is wasteful in 32-bit mode, but we plan to discontinue 32-bit compiles anyway. - feed(hash_state, reinterpret_cast(p)); -} - -template -void feed(HashState &hash_state, const std::pair &p) { - feed(hash_state, p.first); - feed(hash_state, p.second); -} - -template -void feed(HashState &hash_state, const std::vector &vec) { - /* - Feed vector size to ensure that no two different vectors of the same type - have the same code prefix. - */ - feed(hash_state, vec.size()); - for (const T &item : vec) { - feed(hash_state, item); - } -} - - -/* - Public hash functions. - - get_hash() is used internally by the HashMap and HashSet classes below. In - more exotic use cases, such as implementing a custom hash table, you can also - use `get_hash32()`, `get_hash64()` and `get_hash()` directly. -*/ -template -std::uint32_t get_hash32(const T &value) { - HashState hash_state; - feed(hash_state, value); - return hash_state.get_hash32(); -} - -template -std::uint64_t get_hash64(const T &value) { - HashState hash_state; - feed(hash_state, value); - return hash_state.get_hash64(); -} - -template -std::size_t get_hash(const T &value) { - return static_cast(get_hash64(value)); -} - - -// This struct should only be used by HashMap and HashSet below. -template -struct Hash { - std::size_t operator()(const T &val) const { - return get_hash(val); - } -}; - -/* - Aliases for hash sets and hash maps in user code. - - Use these aliases for hashing types T that don't have a standard std::hash - specialization. - - To hash types that are not supported out of the box, implement utils::feed. -*/ -template -using HashMap = std::unordered_map>; - -template -using HashSet = std::unordered_set>; - - -/* - Legacy hash functions. - - We plan to remove these legacy hash functions since implementing std::hash - for non-user-defined types T causes undefined behaviour - (http://en.cppreference.com/w/cpp/language/extending_std) and maintaining - only one set of user-defined hash functions is easier. -*/ - -template -inline void hash_combine(size_t &hash, const T &value) { - std::hash hasher; - /* - The combination of hash values is based on issue 6.18 in - http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2005/n1756.pdf. - Boost combines hash values in the same way. - */ - hash ^= hasher(value) + 0x9e3779b9 + (hash << 6) + (hash >> 2); -} - -template -size_t hash_sequence(const Sequence &data, size_t length) { - size_t hash = 0; - for (size_t i = 0; i < length; ++i) { - hash_combine(hash, data[i]); - } - return hash; -} -} - -namespace std { -template -struct hash> { - size_t operator()(const std::vector &vec) const { - return utils::hash_sequence(vec, vec.size()); - } -}; - -template -struct hash> { - size_t operator()(const std::pair &pair) const { - size_t hash = 0; - utils::hash_combine(hash, pair.first); - utils::hash_combine(hash, pair.second); - return hash; - } -}; -} - -#endif diff --git a/experiments/issue731/hash-microbenchmark/main.cc b/experiments/issue731/hash-microbenchmark/main.cc deleted file mode 100644 index 0e9a1f8803..0000000000 --- a/experiments/issue731/hash-microbenchmark/main.cc +++ /dev/null @@ -1,127 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include "hash.h" - -using namespace std; - - -static void benchmark(const string &desc, int num_calls, - const function &func) { - cout << "Running " << desc << " " << num_calls << " times:" << flush; - - clock_t start = clock(); - for (int j = 0; j < num_calls; ++j) - func(); - clock_t end = clock(); - double duration = static_cast(end - start) / CLOCKS_PER_SEC; - cout << " " << duration << "s" << endl; -} - - -static int scramble(int i) { - return (0xdeadbeef * i) ^ 0xfeedcafe; -} - - -int main(int, char **) { - const int REPETITIONS = 2; - const int NUM_CALLS = 1; - const int NUM_INSERTIONS = 10000000; - const int NUM_READ_PASSES = 10; - - for (int i = 0; i < REPETITIONS; ++i) { - benchmark("nothing", NUM_CALLS, [] () {}); - cout << endl; - - benchmark("insert sequential int with BoostHash", NUM_CALLS, - [&]() { - unordered_set s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.insert(i); - } - }); - benchmark("insert sequential int with BurtleFeed", NUM_CALLS, - [&]() { - utils::HashSet s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.insert(i); - } - }); - cout << endl; - - benchmark("insert scrambled int with BoostHash", NUM_CALLS, - [&]() { - unordered_set s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.insert(scramble(i)); - } - }); - benchmark("insert scrambled int with BurtleFeed", NUM_CALLS, - [&]() { - utils::HashSet s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.insert(scramble(i)); - } - }); - cout << endl; - - benchmark("insert, then read sequential int with BoostHash", NUM_CALLS, - [&]() { - unordered_set s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.insert(i); - } - for (int j = 0; j < NUM_READ_PASSES; ++j) { - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.count(i); - } - } - }); - benchmark("insert, then read sequential int with BurtleFeed", NUM_CALLS, - [&]() { - utils::HashSet s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.insert(i); - } - for (int j = 0; j < NUM_READ_PASSES; ++j) { - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.count(i); - } - } - }); - cout << endl; - - benchmark("insert, then read scrambled int with BoostHash", NUM_CALLS, - [&]() { - unordered_set s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.insert(scramble(i)); - } - for (int j = 0; j < NUM_READ_PASSES; ++j) { - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.count(i); - } - } - }); - benchmark("insert, then read scrambled int with BurtleFeed", NUM_CALLS, - [&]() { - utils::HashSet s; - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.insert(scramble(i)); - } - for (int j = 0; j < NUM_READ_PASSES; ++j) { - for (int i = 0; i < NUM_INSERTIONS; ++i) { - s.count(i); - } - } - }); - cout << endl; - } - - return 0; -} diff --git a/experiments/issue731/relativescatter.py b/experiments/issue731/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue731/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue731/v4-opt.py b/experiments/issue731/v4-opt.py deleted file mode 100755 index 1e840e1fb7..0000000000 --- a/experiments/issue731/v4-opt.py +++ /dev/null @@ -1,48 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue731-base", "issue731-v4"] -BUILDS = ["release32"] -SEARCHES = [ - ("blind", "astar(blind())"), - ("divpot", "astar(diverse_potentials())"), - ("cegar", "astar(cegar())"), - ("systematic2", "astar(cpdbs(systematic(2)))"), - ("ipdb", "astar(ipdb())"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue731/v4-sat.py b/experiments/issue731/v4-sat.py deleted file mode 100755 index 730164925a..0000000000 --- a/experiments/issue731/v4-sat.py +++ /dev/null @@ -1,56 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue731-base", "issue731-v4"] -BUILDS = ["release32"] -SEARCHES = [ - ("ff_lazy", ["--heuristic", "h=ff()", "--search", "lazy_greedy([h], preferred=[h])"]), - ("cea_lazy", ["--heuristic", "h=cea()", "--search", "lazy_greedy([h], preferred=[h])"]), - ("type_based", ["--heuristic", "h=ff()", "--search", "eager(alt([type_based([h, g()])]))"]), - ("zhu_givan", [ - "--heuristic", "hlm=lmcount(lm_zg())", - "--search", """lazy_greedy([hlm], preferred=[hlm])"""]), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - search, - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] + [ - IssueConfig( - "lama-first-{build}".format(**locals()), - [], - build_options=[build], - driver_options=["--build", build, "--alias", "lama-first"]) - for build in BUILDS -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue731/v5-opt.py b/experiments/issue731/v5-opt.py deleted file mode 100755 index 5d8d82f959..0000000000 --- a/experiments/issue731/v5-opt.py +++ /dev/null @@ -1,63 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue731-v5-base", "issue731-v5"] -BUILDS = ["release32", "release64"] -SEARCHES = [ - ("blind", "astar(blind())"), - ("systematic2", "astar(cpdbs(systematic(2)))"), - ("ipdb", "astar(ipdb())"), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - ["--search", search], - build_options=[build], - driver_options=["--build", build]) - for nick, search in SEARCHES - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_comparison_table_step() - -# Compare builds. -for build1, build2 in itertools.combinations(BUILDS, 2): - for rev in REVISIONS: - algorithm_pairs = [ - ("{rev}-{config_nick}-{build1}".format(**locals()), - "{rev}-{config_nick}-{build2}".format(**locals()), - "Diff ({config_nick}-{rev})".format(**locals())) - for config_nick, search in SEARCHES] - exp.add_report( - ComparativeReport( - algorithm_pairs, - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES), - name="issue731-{build1}-vs-{build2}-{rev}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue732/common_setup.py b/experiments/issue732/common_setup.py deleted file mode 100644 index fe0b9d655e..0000000000 --- a/experiments/issue732/common_setup.py +++ /dev/null @@ -1,398 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_running_on_cluster_login_node(): - return platform.node() == "login20.cluster.bc2.ch" - - -def can_publish(): - return is_running_on_cluster_login_node() or not is_running_on_cluster() - - -def publish(report_file): - if can_publish(): - subprocess.call(["publish", report_file]) - else: - print "publishing reports is not supported on this node" - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, name="make-absolute-report", outfile=outfile) - self.add_step("publish-absolute-report", publish, outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def get_revision_pairs_and_files(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - yield (rev1, rev2, outfile) - - def make_comparison_tables(): - for rev1, rev2, outfile in get_revision_pairs_and_files(): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for _, _, outfile in get_revision_pairs_and_files(): - publish(outfile) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step("publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue732/relativescatter.py b/experiments/issue732/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue732/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue732/sg-parser.py b/experiments/issue732/sg-parser.py deleted file mode 100755 index b017324618..0000000000 --- a/experiments/issue732/sg-parser.py +++ /dev/null @@ -1,10 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() - -parser.add_pattern('sg_construction_time', 'time for root successor generation creation: (.+)s', type=float) -parser.add_pattern('sg_peak_mem_diff', 'peak memory difference for root successor generator creation: (\d+) KB', type=int) - -parser.parse() diff --git a/experiments/issue732/v1.py b/experiments/issue732/v1.py deleted file mode 100755 index 487c5d194e..0000000000 --- a/experiments/issue732/v1.py +++ /dev/null @@ -1,65 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.reports import Attribute, arithmetic_mean, finite_sum, geometric_mean - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue732-base", "issue732-v1"] -CONFIGS = [ - IssueConfig( - 'astar-inf', - ['--search', 'astar(const(infinity))'], - ), - IssueConfig( - 'astar-blind', - ['--search', 'astar(blind())'], - ), - IssueConfig( - 'debug-astar-inf', - ['--search', 'astar(const(infinity))'], - build_options=["debug32"], - driver_options=["--build=debug32"], - ), - IssueConfig( - 'debug-astar-blind', - ['--search', 'astar(blind())'], - build_options=["debug32"], - driver_options=["--build=debug32"], - ), -] -SUITE = list(sorted(set(common_setup.DEFAULT_OPTIMAL_SUITE) | - set(common_setup.DEFAULT_SATISFICING_SUITE))) -ENVIRONMENT = MaiaEnvironment( - priority=0, email="malte.helmert@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') -exp.add_command('sg-parser', ['{sg_parser}']) - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("sg_construction_time", functions=[finite_sum], min_wins=True), - Attribute("sg_peak_mem_diff", functions=[finite_sum], min_wins=True), - "error", - "run_dir", -] - -exp.add_absolute_report_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue732/v6-debug.py b/experiments/issue732/v6-debug.py deleted file mode 100755 index cef62e5777..0000000000 --- a/experiments/issue732/v6-debug.py +++ /dev/null @@ -1,50 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, finite_sum - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue732-base", "issue732-v6"] -BUILDS = ["debug32", "release32"] -CONFIGS = [ - IssueConfig( - "lama-first-{build}".format(**locals()), - [], - build_options=[build], - driver_options=["--alias", "lama-first", "--build", build]) - for build in BUILDS -] -SUITE = set( - common_setup.DEFAULT_OPTIMAL_SUITE + common_setup.DEFAULT_SATISFICING_SUITE) -ENVIRONMENT = BaselSlurmEnvironment( - priority=0, email="malte.helmert@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') -exp.add_command('sg-parser', ['{sg_parser}']) - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("sg_construction_time", functions=[finite_sum], min_wins=True), - Attribute("sg_peak_mem_diff", functions=[finite_sum], min_wins=True), -] - -exp.add_comparison_table_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue732/v6.py b/experiments/issue732/v6.py deleted file mode 100755 index 6d7eb6ffbf..0000000000 --- a/experiments/issue732/v6.py +++ /dev/null @@ -1,71 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, finite_sum - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = [ - "issue732-{rev}".format(**locals()) - for rev in ["base", "v1", "v2", "v3", "v4", "v5", "v6"]] -BUILDS = ["release32"] -SEARCHES = [ - ("astar-inf", ["--search", "astar(const(infinity))"]), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - config, - build_options=[build], - driver_options=["--build", build]) - for nick, config in SEARCHES - for build in BUILDS -] -SUITE = set( - common_setup.DEFAULT_OPTIMAL_SUITE + common_setup.DEFAULT_SATISFICING_SUITE) -ENVIRONMENT = BaselSlurmEnvironment( - priority=0, email="malte.helmert@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') -exp.add_command('sg-parser', ['{sg_parser}']) - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("sg_construction_time", functions=[finite_sum], min_wins=True), - Attribute("sg_peak_mem_diff", functions=[finite_sum], min_wins=True), -] - -# Instead of comparing all revision pairs in separate reports, create a -# single report comparing neighboring revisions. -# exp.add_comparison_table_step(attributes=attributes) -compared_configs = [] -for rev1, rev2 in zip(REVISIONS[:-1], REVISIONS[1:]): - for config in CONFIGS: - config_nick = config.nick - compared_configs.append( - ("{rev1}-{config_nick}".format(**locals()), - "{rev2}-{config_nick}".format(**locals()), - "Diff ({config_nick})".format(**locals()))) -exp.add_report( - ComparativeReport(compared_configs, attributes=attributes), - name="compare-all-tags") - -exp.run_steps() diff --git a/experiments/issue732/v7-debug.py b/experiments/issue732/v7-debug.py deleted file mode 100755 index b047549843..0000000000 --- a/experiments/issue732/v7-debug.py +++ /dev/null @@ -1,50 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, finite_sum - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue732-base", "issue732-v7"] -BUILDS = ["debug32", "release32"] -CONFIGS = [ - IssueConfig( - "lama-first-{build}".format(**locals()), - [], - build_options=[build], - driver_options=["--alias", "lama-first", "--build", build]) - for build in BUILDS -] -SUITE = set( - common_setup.DEFAULT_OPTIMAL_SUITE + common_setup.DEFAULT_SATISFICING_SUITE) -ENVIRONMENT = BaselSlurmEnvironment( - priority=0, email="malte.helmert@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') -exp.add_command('sg-parser', ['{sg_parser}']) - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("sg_construction_time", functions=[finite_sum], min_wins=True), - Attribute("sg_peak_mem_diff", functions=[finite_sum], min_wins=True), -] - -exp.add_comparison_table_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue732/v7.py b/experiments/issue732/v7.py deleted file mode 100755 index fecd62e1a7..0000000000 --- a/experiments/issue732/v7.py +++ /dev/null @@ -1,71 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, finite_sum - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = [ - "issue732-{rev}".format(**locals()) - for rev in ["base", "v1", "v2", "v3", "v4", "v5", "v6", "v7"]] -BUILDS = ["release32"] -SEARCHES = [ - ("astar-inf", ["--search", "astar(const(infinity))"]), -] -CONFIGS = [ - IssueConfig( - "{nick}-{build}".format(**locals()), - config, - build_options=[build], - driver_options=["--build", build]) - for nick, config in SEARCHES - for build in BUILDS -] -SUITE = set( - common_setup.DEFAULT_OPTIMAL_SUITE + common_setup.DEFAULT_SATISFICING_SUITE) -ENVIRONMENT = BaselSlurmEnvironment( - priority=0, email="malte.helmert@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_resource('sg_parser', 'sg-parser.py', dest='sg-parser.py') -exp.add_command('sg-parser', ['{sg_parser}']) - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("sg_construction_time", functions=[finite_sum], min_wins=True), - Attribute("sg_peak_mem_diff", functions=[finite_sum], min_wins=True), -] - -# Instead of comparing all revision pairs in separate reports, create a -# single report comparing neighboring revisions. -# exp.add_comparison_table_step(attributes=attributes) -compared_configs = [] -for rev1, rev2 in zip(REVISIONS[:-1], REVISIONS[1:]): - for config in CONFIGS: - config_nick = config.nick - compared_configs.append( - ("{rev1}-{config_nick}".format(**locals()), - "{rev2}-{config_nick}".format(**locals()), - "Diff ({config_nick})".format(**locals()))) -exp.add_report( - ComparativeReport(compared_configs, attributes=attributes), - name="compare-all-tags") - -exp.run_steps() diff --git a/experiments/issue733/common_setup.py b/experiments/issue733/common_setup.py deleted file mode 100644 index fe0b9d655e..0000000000 --- a/experiments/issue733/common_setup.py +++ /dev/null @@ -1,398 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_running_on_cluster_login_node(): - return platform.node() == "login20.cluster.bc2.ch" - - -def can_publish(): - return is_running_on_cluster_login_node() or not is_running_on_cluster() - - -def publish(report_file): - if can_publish(): - subprocess.call(["publish", report_file]) - else: - print "publishing reports is not supported on this node" - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, name="make-absolute-report", outfile=outfile) - self.add_step("publish-absolute-report", publish, outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def get_revision_pairs_and_files(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - yield (rev1, rev2, outfile) - - def make_comparison_tables(): - for rev1, rev2, outfile in get_revision_pairs_and_files(): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for _, _, outfile in get_revision_pairs_and_files(): - publish(outfile) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step("publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue733/relativescatter.py b/experiments/issue733/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue733/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue733/v1.py b/experiments/issue733/v1.py deleted file mode 100755 index 532149bedc..0000000000 --- a/experiments/issue733/v1.py +++ /dev/null @@ -1,64 +0,0 @@ -#! /usr/bin/env python2 -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab import tools - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue733-base", "issue733-v1"] -PYTHONS = ["python2.7", "python3.5"] -CONFIGS = [ - IssueConfig( - "{python}".format(**locals()), - [], - driver_options=["--translate"]) - for python in PYTHONS -] -SUITE = set(common_setup.DEFAULT_OPTIMAL_SUITE + common_setup.DEFAULT_SATISFICING_SUITE) -BaselSlurmEnvironment.ENVIRONMENT_SETUP = ( - 'module purge\n' - 'module load Python/3.5.2-goolf-1.7.20\n' - 'module load matplotlib/1.5.1-goolf-1.7.20-Python-3.5.2\n' - 'PYTHONPATH="%s:$PYTHONPATH"' % tools.get_lab_path()) -ENVIRONMENT = BaselSlurmEnvironment( - priority=0, email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -class PythonVersionExperiment(IssueExperiment): - def _add_runs(self): - IssueExperiment._add_runs(self) - for run in self.runs: - python = run.algo.name.split("-")[-1] - command, kwargs = run.commands["fast-downward"] - command = [python] + command - run.commands["fast-downward"] = (command, kwargs) - -exp = PythonVersionExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -del exp.commands["parse-search"] -exp.add_suite(BENCHMARKS_DIR, SUITE) - -attributes = ["translator_time_done", "translator_peak_memory"] -exp.add_comparison_table_step(attributes=attributes) -compared_configs = [ - ("issue733-v1-python2.7", "issue733-v1-python3.5", "Diff")] -exp.add_report( - ComparativeReport(compared_configs, attributes=attributes), - name="compare-python-versions") - -exp.run_steps() diff --git a/experiments/issue735/common_setup.py b/experiments/issue735/common_setup.py deleted file mode 100644 index 5d2b40b61c..0000000000 --- a/experiments/issue735/common_setup.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue735/custom-parser.py b/experiments/issue735/custom-parser.py deleted file mode 100755 index 8cdc8c5aae..0000000000 --- a/experiments/issue735/custom-parser.py +++ /dev/null @@ -1,32 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - - -def add_dominance_pruning_failed(content, props): - if "dominance_pruning=False" in content: - failed = False - elif "pdb_collection_construction_time" not in props: - failed = False - else: - failed = "dominance_pruning_time" not in props - props["dominance_pruning_failed"] = int(failed) - - -def main(): - print "Running custom parser" - parser = Parser() - parser.add_pattern( - "pdb_collection_construction_time", "^PDB collection construction time: (.+)s$", type=float, flags="M", required=False) - parser.add_pattern( - "dominance_pruning_time", "^Dominance pruning took (.+)s$", type=float, flags="M", required=False) - parser.add_pattern( - "dominance_pruning_pruned_subsets", "Pruned (\d+) of \d+ maximal additive subsets", type=int, required=False) - parser.add_pattern( - "dominance_pruning_pruned_pdbs", "Pruned (\d+) of \d+ PDBs", type=int, required=False) - parser.add_function(add_dominance_pruning_failed) - parser.parse() - - -main() - diff --git a/experiments/issue735/relativescatter.py b/experiments/issue735/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue735/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue735/v1.py b/experiments/issue735/v1.py deleted file mode 100755 index d4b116f29a..0000000000 --- a/experiments/issue735/v1.py +++ /dev/null @@ -1,63 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue735-base", "issue735-v1"] -BUILD_OPTIONS = ["release32nolp"] -DRIVER_OPTIONS = ["--build", "release32nolp", "--search-time-limit", "30m"] -CONFIGS = [ - IssueConfig( - "cpdbs-sys2", - ["--search", "astar(cpdbs(systematic(2)))"], - build_options=BUILD_OPTIONS, - driver_options=DRIVER_OPTIONS), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -IssueExperiment.DEFAULT_TABLE_ATTRIBUTES += [ - "dominance_pruning_failed", - "dominance_pruning_time", - "dominance_pruning_pruned_subsets", - "dominance_pruning_pruned_pdbs", - "pdb_collection_construction_time"] - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource("custom_parser", "custom-parser.py") -exp.add_command("run-custom-parser", ["{custom_parser}"]) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_fetcher(name="parse-again", parsers=["custom-parser.py"]) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue735/v3-no-pruning.py b/experiments/issue735/v3-no-pruning.py deleted file mode 100755 index 6957a9ec24..0000000000 --- a/experiments/issue735/v3-no-pruning.py +++ /dev/null @@ -1,53 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue735-v3"] -BUILD_OPTIONS = ["release32nolp"] -DRIVER_OPTIONS = ["--build", "release32nolp"] -CONFIGS = [ - IssueConfig( - "cpdbs-{nick}-pruning-{pruning}".format(**locals()), - ["--search", "astar(cpdbs({generator}, dominance_pruning={pruning}))".format(**locals())], - build_options=BUILD_OPTIONS, - driver_options=DRIVER_OPTIONS) - for nick, generator in [("sys2", "systematic(2)"), ("hc", "hillclimbing(max_time=900)")] - for pruning in [False, True] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -IssueExperiment.DEFAULT_TABLE_ATTRIBUTES += [ - "dominance_pruning_failed", - "dominance_pruning_time", - "dominance_pruning_pruned_subsets", - "dominance_pruning_pruned_pdbs", - "pdb_collection_construction_time"] - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource("custom_parser", "custom-parser.py") -exp.add_command("run-custom-parser", ["{custom_parser}"]) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue735/v3.py b/experiments/issue735/v3.py deleted file mode 100755 index bb9b3f6e87..0000000000 --- a/experiments/issue735/v3.py +++ /dev/null @@ -1,74 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue735-base", "issue735-v1", "issue735-v2", "issue735-v3"] -BUILD_OPTIONS = ["release32nolp"] -DRIVER_OPTIONS = ["--build", "release32nolp"] -CONFIGS = [ - IssueConfig( - "cpdbs-sys2", - ["--search", "astar(cpdbs(systematic(2)))"], - build_options=BUILD_OPTIONS, - driver_options=DRIVER_OPTIONS), - IssueConfig( - "cpdbs-hc", - ["--search", "astar(cpdbs(hillclimbing(max_time=900)))"], - build_options=BUILD_OPTIONS, - driver_options=DRIVER_OPTIONS), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -IssueExperiment.DEFAULT_TABLE_ATTRIBUTES += [ - "dominance_pruning_failed", - "dominance_pruning_time", - "dominance_pruning_pruned_subsets", - "dominance_pruning_pruned_pdbs", - "pdb_collection_construction_time"] - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_algorithm( - "issue735-v3:cpdbs-sys2-debug", - common_setup.get_repo_base(), - "issue735-v3", - ["--search", "astar(cpdbs(systematic(2)))"], - build_options=["debug32nolp"], - driver_options=["--build", "debug32nolp"]) -exp.add_resource("custom_parser", "custom-parser.py") -exp.add_command("run-custom-parser", ["{custom_parser}"]) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue736/common_setup.py b/experiments/issue736/common_setup.py deleted file mode 100644 index fe0b9d655e..0000000000 --- a/experiments/issue736/common_setup.py +++ /dev/null @@ -1,398 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_running_on_cluster_login_node(): - return platform.node() == "login20.cluster.bc2.ch" - - -def can_publish(): - return is_running_on_cluster_login_node() or not is_running_on_cluster() - - -def publish(report_file): - if can_publish(): - subprocess.call(["publish", report_file]) - else: - print "publishing reports is not supported on this node" - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, name="make-absolute-report", outfile=outfile) - self.add_step("publish-absolute-report", publish, outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def get_revision_pairs_and_files(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - yield (rev1, rev2, outfile) - - def make_comparison_tables(): - for rev1, rev2, outfile in get_revision_pairs_and_files(): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for _, _, outfile in get_revision_pairs_and_files(): - publish(outfile) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step("publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue736/relativescatter.py b/experiments/issue736/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue736/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue736/translator_additional_parser.py b/experiments/issue736/translator_additional_parser.py deleted file mode 100755 index 191ada9490..0000000000 --- a/experiments/issue736/translator_additional_parser.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -import hashlib - -from lab.parser import Parser - -def add_hash_value(content, props): - props['translator_output_sas_xz_hash'] = hashlib.sha512(content).hexdigest() - -parser = Parser() -parser.add_function(add_hash_value, file="output.sas.xz") -parser.parse() diff --git a/experiments/issue736/v1.py b/experiments/issue736/v1.py deleted file mode 100755 index 59e76ca087..0000000000 --- a/experiments/issue736/v1.py +++ /dev/null @@ -1,61 +0,0 @@ -#! /usr/bin/env python2 -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab import tools - -from downward.reports.compare import ComparativeReport -from downward.reports import PlanningReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue736-base", "issue736-v1"] -CONFIGS = [ - IssueConfig( - "translate-only", - [], - driver_options=["--translate"]) -] -SUITE = set(common_setup.DEFAULT_OPTIMAL_SUITE + common_setup.DEFAULT_SATISFICING_SUITE) -ENVIRONMENT = BaselSlurmEnvironment(email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -del exp.commands["parse-search"] -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_resource("translator_additional_parser", "translator_additional_parser.py", dest="translator_additional_parser.py") -exp.add_command("translator_additional_parser", ["{translator_additional_parser}"]) - -class TranslatorDiffReport(PlanningReport): - def get_cell(self, run): - return ";".join(run.get(attr) for attr in self.attributes) - - def get_text(self): - lines = [] - for runs in self.problem_runs.values(): - hashes = set([r.get("translator_output_sas_xz_hash") for r in runs]) - if len(hashes) > 1 or None in hashes: - lines.append(";".join([self.get_cell(r) for r in runs])) - return "\n".join(lines) - - -exp.add_report(TranslatorDiffReport( - attributes=["domain", "problem", "algorithm", "run_dir"] - ), outfile="different_output_sas.csv" -) - -exp.run_steps() diff --git a/experiments/issue739/common_setup.py b/experiments/issue739/common_setup.py deleted file mode 100644 index b0a8bcf7ee..0000000000 --- a/experiments/issue739/common_setup.py +++ /dev/null @@ -1,388 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if matplotlib: - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue739/exit-code-converter-parser.py b/experiments/issue739/exit-code-converter-parser.py deleted file mode 100755 index 36f3aacfb0..0000000000 --- a/experiments/issue739/exit-code-converter-parser.py +++ /dev/null @@ -1,25 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() - -LEGACY_TO_NEW_EXIT_CODES = { - 'critical-error': 'search-critical-error', - 'input-error': 'search-input-error', - 'unsupported-feature-requested': 'search-unsupported', - 'unsolvable': 'search-unsolvable', - 'incomplete-search-found-no-plan': 'search-unsolvable-incomplete', - 'out-of-memory': 'search-out-of-memory', - 'timeout': 'search-out-of-time', - 'timeout-and-out-of-memory': 'search-out-of-memory-and-time', -} - -def convert_legacy_to_new_exit_codes(content, props): - error = props['error'] - if error in LEGACY_TO_NEW_EXIT_CODES: - props['error'] = LEGACY_TO_NEW_EXIT_CODES[error] - -parser.add_function(convert_legacy_to_new_exit_codes) - -parser.parse() diff --git a/experiments/issue739/relativescatter.py b/experiments/issue739/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue739/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue739/v1.py b/experiments/issue739/v1.py deleted file mode 100755 index d8c3f40ce7..0000000000 --- a/experiments/issue739/v1.py +++ /dev/null @@ -1,39 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue739-base", "issue739-v1"] -CONFIGS = [ - IssueConfig('translate', [], driver_options=['--translate']), - IssueConfig('translate-time-limit', [], driver_options=['--translate-time-limit', '5s', '--translate']), - IssueConfig('translate-memory-limit', [], driver_options=['--translate-memory-limit', '100M', '--translate']), - IssueConfig('search-time-limit', ['--search', 'astar(lmcut())'], driver_options=['--search-time-limit', '20s']), - IssueConfig('search-memory-limit', ['--search', 'astar(lmcut())'], driver_options=['--search-memory-limit', '100M']), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="silvan.sievers@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_resource('exit_code_converter_parser', 'exit-code-converter-parser.py', dest='exit-code-converter-parser.py') -exp.add_command('exit-code-converter-parser', ['{exit_code_converter_parser}']) -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue739/v2-search.py b/experiments/issue739/v2-search.py deleted file mode 100755 index df67060e54..0000000000 --- a/experiments/issue739/v2-search.py +++ /dev/null @@ -1,36 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue739-base", "issue739-v2"] -CONFIGS = [ - IssueConfig('search-time-limit', ['--search', 'astar(blind())'], driver_options=['--search-time-limit', '20s']), - IssueConfig('search-memory-limit', ['--search', 'astar(blind())'], driver_options=['--search-memory-limit', '100M']), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="silvan.sievers@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_resource('exit_code_converter_parser', 'exit-code-converter-parser.py', dest='exit-code-converter-parser.py') -exp.add_command('exit-code-converter-parser', ['{exit_code_converter_parser}']) -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue739/v2-translate.py b/experiments/issue739/v2-translate.py deleted file mode 100755 index 945cf11d7f..0000000000 --- a/experiments/issue739/v2-translate.py +++ /dev/null @@ -1,36 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue739-v2"] -CONFIGS = [ - IssueConfig('translate', [], driver_options=['--translate']), - IssueConfig('translate-time-limit', [], driver_options=['--translate-time-limit', '5s', '--translate']), - IssueConfig('translate-memory-limit', [], driver_options=['--translate-memory-limit', '100M', '--translate']), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="silvan.sievers@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -del exp.commands['parse-search'] -exp.add_absolute_report_step(attributes=['translator_*', 'error']) - -exp.run_steps() diff --git a/experiments/issue739/v3-translate.py b/experiments/issue739/v3-translate.py deleted file mode 100755 index 5b8ef93d4c..0000000000 --- a/experiments/issue739/v3-translate.py +++ /dev/null @@ -1,36 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue739-v3"] -CONFIGS = [ - IssueConfig('translate', [], driver_options=['--translate']), - IssueConfig('translate-time-limit', [], driver_options=['--translate-time-limit', '5s', '--translate']), - IssueConfig('translate-memory-limit', [], driver_options=['--translate-memory-limit', '100M', '--translate']), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="silvan.sievers@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -del exp.commands['parse-search'] -exp.add_absolute_report_step(attributes=['translator_*', 'error']) - -exp.run_steps() diff --git a/experiments/issue739/v4-translate-with-options.py b/experiments/issue739/v4-translate-with-options.py deleted file mode 100755 index 4dbf2d57df..0000000000 --- a/experiments/issue739/v4-translate-with-options.py +++ /dev/null @@ -1,34 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue739-v4"] -CONFIGS = [ - IssueConfig('translate-with-options', ['--translate-options', '--keep-unreachable-facts', '--keep-unimportant-variables', '--full-encoding'], driver_options=['--translate']), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="silvan.sievers@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -del exp.commands['parse-search'] -exp.add_absolute_report_step(attributes=['translator_*', 'error']) - -exp.run_steps() diff --git a/experiments/issue739/v4-translate.py b/experiments/issue739/v4-translate.py deleted file mode 100755 index a71f97d329..0000000000 --- a/experiments/issue739/v4-translate.py +++ /dev/null @@ -1,37 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue739-v4"] -CONFIGS = [ - IssueConfig('translate', [], driver_options=['--translate']), - IssueConfig('translate-with-options', ['--keep-unreachable-facts', '--keep-unimportant-variables', '--full-encoding'], driver_options=['--translate']), - IssueConfig('translate-time-limit', [], driver_options=['--translate-time-limit', '5s', '--translate']), - IssueConfig('translate-memory-limit', [], driver_options=['--translate-memory-limit', '100M', '--translate']), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="silvan.sievers@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -del exp.commands['parse-search'] -exp.add_absolute_report_step(attributes=['translator_*', 'error']) - -exp.run_steps() diff --git a/experiments/issue739/v5-translate.py b/experiments/issue739/v5-translate.py deleted file mode 100755 index 43ba3750ab..0000000000 --- a/experiments/issue739/v5-translate.py +++ /dev/null @@ -1,46 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue739-v5"] -CONFIGS = [ - IssueConfig('translate', [], driver_options=['--translate']), - IssueConfig('translate-with-options', ['--translate-options', '--keep-unreachable-facts', '--keep-unimportant-variables', '--full-encoding'], driver_options=['--translate']), - IssueConfig('translate-time-limit', [], driver_options=['--translate-time-limit', '5s', '--translate']), - IssueConfig('translate-memory-limit', [], driver_options=['--translate-memory-limit', '100M', '--translate']), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="silvan.sievers@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.LAB_STATIC_PROPERTIES_PARSER) -exp.add_parser(exp.LAB_DRIVER_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step(attributes=['translator_*', 'error']) - -exp.run_steps() diff --git a/experiments/issue740/common.py b/experiments/issue740/common.py deleted file mode 100755 index edf4533ec4..0000000000 --- a/experiments/issue740/common.py +++ /dev/null @@ -1,63 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["8e0b8e1f6edc"] -EXPORTS = ["PYTHONPATH", "PATH", "DOWNWARD_BENCHMARKS"] - - -def generate_configs(sas_filenames): - configs = [] - for sas_file in sas_filenames: - - common_driver_options = [] if sas_file is None else ["--sas-file", sas_file] - - configs += [ - IssueConfig('lazy-greedy-blind-{}'.format(sas_file), ['--search', 'lazy_greedy([blind()])'], - driver_options=common_driver_options + []), - - IssueConfig('lama-first-{}'.format(sas_file), [], - driver_options=common_driver_options + ["--alias", "lama-first"]), - - IssueConfig("seq_sat_fdss_1-{}".format(sas_file), [], - driver_options=common_driver_options + ["--alias", "seq-sat-fdss-1"]), - - IssueConfig("seq_sat_fdss_-{}".format(sas_file), [], - driver_options=common_driver_options + ["--portfolio", "driver/portfolios/seq_sat_fdss_2.py", - "--overall-time-limit", "20s"]), - - IssueConfig('translate-only-{}'.format(sas_file), [], - driver_options=['--translate'] + common_driver_options), - ] - return configs - - -def generate_experiments(configs): - SUITE = ["gripper:prob01.pddl", - "blocks:probBLOCKS-5-0.pddl", - "visitall-sat11-strips:problem12.pddl", - "airport:p01-airport1-p1.pddl"] - - ENVIRONMENT = BaselSlurmEnvironment(email="guillem.frances@unibas.ch", export=EXPORTS) - - if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - - exp = IssueExperiment( - revisions=REVISIONS, - configs=configs, - environment=ENVIRONMENT, - ) - exp.add_suite(BENCHMARKS_DIR, SUITE) - exp.add_absolute_report_step() - exp.run_steps() diff --git a/experiments/issue740/common_setup.py b/experiments/issue740/common_setup.py deleted file mode 100644 index 3a8ac5495c..0000000000 --- a/experiments/issue740/common_setup.py +++ /dev/null @@ -1,384 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - from relativescatter import RelativeScatterPlotReport - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue740/foobar_sas.py b/experiments/issue740/foobar_sas.py deleted file mode 100755 index e24fc2bb89..0000000000 --- a/experiments/issue740/foobar_sas.py +++ /dev/null @@ -1,6 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common - -common.generate_experiments(common.generate_configs(["foobar.sas"])) diff --git a/experiments/issue740/out_sas.py b/experiments/issue740/out_sas.py deleted file mode 100755 index aa2548e0df..0000000000 --- a/experiments/issue740/out_sas.py +++ /dev/null @@ -1,7 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- -import common - -# We want to test both NOT specifying the -sas-file option AND specifying the default "output.sas" value. -# The result should be the same in both cases -common.generate_experiments(common.generate_configs((None, "output.sas"))) diff --git a/experiments/issue742/common_setup.py b/experiments/issue742/common_setup.py deleted file mode 100644 index df2613bf87..0000000000 --- a/experiments/issue742/common_setup.py +++ /dev/null @@ -1,392 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -try: - from relativescatter import RelativeScatterPlotReport - matplotlib = True -except ImportError: - print 'matplotlib not availabe, scatter plots not available' - matplotlib = False - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - "unsolvable", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if matplotlib: - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue742/ms-parser.py b/experiments/issue742/ms-parser.py deleted file mode 100755 index a33b1f76f3..0000000000 --- a/experiments/issue742/ms-parser.py +++ /dev/null @@ -1,61 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) -parser.add_pattern('ms_atomic_construction_time', 't=(.+)s \(after computation of atomic transition systems\)', required=False, type=float) -parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'none' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -parser.parse() diff --git a/experiments/issue742/v1-debug.py b/experiments/issue742/v1-debug.py deleted file mode 100755 index 8083422f3d..0000000000 --- a/experiments/issue742/v1-debug.py +++ /dev/null @@ -1,67 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue742-v1"] -CONFIGS = [ - IssueConfig('rl-rnd-noprune', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_random,merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))'], driver_options=['--debug'], build_options=['--debug']), - IssueConfig('rl-rnd-punr', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_random,merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=true,prune_irrelevant_states=false))'], driver_options=['--debug'], build_options=['--debug']), - IssueConfig('rl-rnd-pirr', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_random,merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=true))'], driver_options=['--debug'], build_options=['--debug']), - IssueConfig('rl-rnd-fullprune', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_random,merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=true,prune_irrelevant_states=true))'], driver_options=['--debug'], build_options=['--debug']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step() - -exp.run_steps() diff --git a/experiments/issue742/v1.py b/experiments/issue742/v1.py deleted file mode 100755 index d5a71b2e70..0000000000 --- a/experiments/issue742/v1.py +++ /dev/null @@ -1,67 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from common_setup import IssueConfig, IssueExperiment, DEFAULT_OPTIMAL_SUITE, is_test_run - -BENCHMARKS_DIR=os.path.expanduser('~/repos/downward/benchmarks') -REVISIONS = ["issue742-v1"] -CONFIGS = [ - IssueConfig('rl-rnd-noprune', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_random,merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=false))']), - IssueConfig('rl-rnd-punr', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_random,merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=true,prune_irrelevant_states=false))']), - IssueConfig('rl-rnd-pirr', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_random,merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=false,prune_irrelevant_states=true))']), - IssueConfig('rl-rnd-fullprune', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_random,merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),label_reduction=exact(before_shrinking=false,before_merging=true),max_states=50000,prune_unreachable_states=true,prune_irrelevant_states=true))']), -] -SUITE = DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email='silvan.sievers@unibas.ch') - -if is_test_run(): - SUITE = ['depot:p01.pddl', 'depot:p02.pddl', 'parcprinter-opt11-strips:p01.pddl', 'parcprinter-opt11-strips:p02.pddl', 'mystery:prob07.pddl'] - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_resource('ms_parser', 'ms-parser.py', dest='ms-parser.py') -exp.add_command('ms-parser', ['{ms_parser}']) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step() - -exp.run_steps() diff --git a/experiments/issue743/common_setup.py b/experiments/issue743/common_setup.py deleted file mode 100644 index fe0b9d655e..0000000000 --- a/experiments/issue743/common_setup.py +++ /dev/null @@ -1,398 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_running_on_cluster_login_node(): - return platform.node() == "login20.cluster.bc2.ch" - - -def can_publish(): - return is_running_on_cluster_login_node() or not is_running_on_cluster() - - -def publish(report_file): - if can_publish(): - subprocess.call(["publish", report_file]) - else: - print "publishing reports is not supported on this node" - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, name="make-absolute-report", outfile=outfile) - self.add_step("publish-absolute-report", publish, outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def get_revision_pairs_and_files(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - yield (rev1, rev2, outfile) - - def make_comparison_tables(): - for rev1, rev2, outfile in get_revision_pairs_and_files(): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for _, _, outfile in get_revision_pairs_and_files(): - publish(outfile) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step("publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue743/relativescatter.py b/experiments/issue743/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue743/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue743/v1.py b/experiments/issue743/v1.py deleted file mode 100755 index 9594caebbe..0000000000 --- a/experiments/issue743/v1.py +++ /dev/null @@ -1,39 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue743-v1"] -CONFIGS = [ - IssueConfig( - 'ipdb-goal-vars-{goal_vars}'.format(**locals()), - ['--search', 'astar(ipdb(max_time=900, consider_co_effect_vars={goal_vars}))'.format(**locals())]) - for goal_vars in [False, True] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue743/v2-vs-base.py b/experiments/issue743/v2-vs-base.py deleted file mode 100755 index 0cfcca74aa..0000000000 --- a/experiments/issue743/v2-vs-base.py +++ /dev/null @@ -1,40 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = [] -CONFIGS = [] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_algorithm( - "base-ipdb-no-goal-vars", common_setup.get_repo_base(), "issue743-base", - ['--search', 'astar(ipdb(max_time=900))']) -exp.add_algorithm( - "v2-ipdb-no-goal-vars", common_setup.get_repo_base(), "issue743-v2", - ['--search', 'astar(ipdb(max_time=900, use_co_effect_goal_variables=false))']) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue743/v2.py b/experiments/issue743/v2.py deleted file mode 100755 index 75339d1096..0000000000 --- a/experiments/issue743/v2.py +++ /dev/null @@ -1,39 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue743-v2"] -CONFIGS = [ - IssueConfig( - 'ipdb-goal-vars-{goal_vars}'.format(**locals()), - ['--search', 'astar(ipdb(max_time=900, use_co_effect_goal_variables={goal_vars}))'.format(**locals())]) - for goal_vars in [False, True] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue743/v3.py b/experiments/issue743/v3.py deleted file mode 100755 index 157c55285e..0000000000 --- a/experiments/issue743/v3.py +++ /dev/null @@ -1,36 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue743-v2", "issue743-v3"] -CONFIGS = [ - IssueConfig('ipdb-900s', ['--search', 'astar(ipdb(max_time=900))']) -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="jendrik.seipp@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue744/base-opt-30min.py b/experiments/issue744/base-opt-30min.py deleted file mode 100755 index a5496f4ec9..0000000000 --- a/experiments/issue744/base-opt-30min.py +++ /dev/null @@ -1,83 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -EXPNAME = common_setup.get_experiment_name() -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue744-base"] -SEARCHES = [ - ("bjolp", [ - "--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - ("blind", ["--search", "astar(blind())"]), - ("cegar", ["--search", "astar(cegar())"]), - ("divpot", ["--search", "astar(diverse_potentials())"]), - ("ipdb", ["--search", "astar(ipdb())"]), - ("lmcut", ["--search", "astar(lmcut())"]), - ("mas", [ - "--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false)," - " merge_strategy=merge_sccs(order_of_sccs=topological," - " merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order]))," - " label_reduction=exact(before_shrinking=true, before_merging=false)," - " max_states=50000, threshold_before_merge=1))"]), - ("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"]), - ("h2", ["--search", "astar(hm(m=2))"]), - ("hmax", ["--search", "astar(hmax())"]), -] -CONFIGS = [ - IssueConfig(search_nick, search, - driver_options=["--overall-time-limit", "30m"]) - for rev in REVISIONS - for search_nick, search in SEARCHES -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="manuel.heusner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('custom-parser.py') - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") - -log_size = Attribute('log_size') -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [log_size] - -exp.add_absolute_report_step(attributes=attributes) -#exp.add_comparison_table_step() - -sort_spec = [('log_size', 'desc')] -attributes = ['run_dir', 'log_size'] -exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec) - -exp.run_steps() diff --git a/experiments/issue744/base-sat-30min.py b/experiments/issue744/base-sat-30min.py deleted file mode 100755 index fe2e610442..0000000000 --- a/experiments/issue744/base-sat-30min.py +++ /dev/null @@ -1,100 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -EXPNAME = common_setup.get_experiment_name() -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue744-base"] -CONFIG_DICT = { - "eager_greedy_ff": [ - "--evaluator", - "h=ff()", - "--search", - "eager_greedy([h], preferred=[h])"], - "eager_greedy_cea": [ - "--evaluator", - "h=cea()", - "--search", - "eager_greedy([h], preferred=[h])"], - "lazy_greedy_add": [ - "--evaluator", - "h=add()", - "--search", - "lazy_greedy([h], preferred=[h])"], - "lazy_greedy_cg": [ - "--evaluator", - "h=cg()", - "--search", - "lazy_greedy([h], preferred=[h])"], - "lama-first": [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false)"""], - "lama-first-typed": [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - "lazy(alt([single(hff), single(hff, pref_only=true)," - "single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000)," - "preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true," - "preferred_successors_first=false)"], -} -CONFIGS = [ - IssueConfig(config_nick, config, - driver_options=["--overall-time-limit", "30m"]) - for rev in REVISIONS - for config_nick, config in CONFIG_DICT.items() -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="manuel.heusner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('custom-parser.py') - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") - -log_size = Attribute('log_size') -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [log_size] - -exp.add_absolute_report_step(attributes=attributes) -#exp.add_comparison_table_step() - -sort_spec = [('log_size', 'desc')] -attributes = ['run_dir', 'log_size'] -exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec) - -exp.run_steps() diff --git a/experiments/issue744/common_setup.py b/experiments/issue744/common_setup.py deleted file mode 100644 index 13c3113df4..0000000000 --- a/experiments/issue744/common_setup.py +++ /dev/null @@ -1,410 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport -from sortedreport import SortedReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "quality", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_sorted_report_step(self, sort_spec, name=None, **kwargs): - """Add step that makes a sorted report. - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = SortedReport(sort_spec, **kwargs) - name = name or "sorted" - name = "-" + name - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + name + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish{}-report'.format(name), subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print("Make scatter plot for", name) - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue744/custom-parser.py b/experiments/issue744/custom-parser.py deleted file mode 100755 index 713dd060b1..0000000000 --- a/experiments/issue744/custom-parser.py +++ /dev/null @@ -1,13 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -def compute_log_size(content, props): - props["log_size"] = len(content) - -def main(): - parser = Parser() - parser.add_function(compute_log_size) - parser.parse() - -main() diff --git a/experiments/issue744/relativescatter.py b/experiments/issue744/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue744/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue744/sortedreport.py b/experiments/issue744/sortedreport.py deleted file mode 100644 index fb8825c22f..0000000000 --- a/experiments/issue744/sortedreport.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Downward Lab uses the Lab package to conduct experiments with the -# Fast Downward planning system. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from operator import itemgetter - -import logging - -from lab.reports import Table, DynamicDataModule - -from downward.reports import PlanningReport - - - -class SortedReport(PlanningReport): - def __init__(self, sort_spec, **kwargs): - PlanningReport.__init__(self, **kwargs) - self._sort_spec = sort_spec - - def get_markup(self): - """ - Return `txt2tags `_ markup for the report. - - """ - table = Table() - row_sort_module = RowSortModule(self._sort_spec) - table.dynamic_data_modules.append(row_sort_module) - for run_id, run in self.props.items(): - row = {} - for key, value in run.items(): - if key not in self.attributes: - continue - if isinstance(value, (list, tuple)): - key = '-'.join([str(item) for item in value]) - row[key] = value - table.add_row(run_id, row) - return str(table) - -class RowSortModule(DynamicDataModule): - def __init__(self, sort_spec): - self._sort_spec = sort_spec - - def modify_printable_row_order(self, table, row_order): - col_names = [None] + table.col_names - - entries = [] - for row_name in row_order: - if row_name == 'column names (never printed)': - continue - entry = [row_name] + table.get_row(row_name) - entries.append(tuple(entry)) - - for attribute, desc in reversed(self._sort_spec): - index = col_names.index(attribute) - reverse = desc == 'desc' - - entries.sort(key=itemgetter(index), reverse=reverse) - - new_row_order = ['column names (never printed)'] + [i[0] for i in entries] - - return new_row_order diff --git a/experiments/issue744/v1-opt-30min.py b/experiments/issue744/v1-opt-30min.py deleted file mode 100755 index 67e0863577..0000000000 --- a/experiments/issue744/v1-opt-30min.py +++ /dev/null @@ -1,121 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -EXPNAME = common_setup.get_experiment_name() -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue744-v1"] -SEARCHES = [ - ("bjolp-silent", [ - "--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc, verbosity=silent)"]), - ("blind-silent", ["--search", "astar(blind(), verbosity=silent)"]), - ("cegar-silent", ["--search", "astar(cegar(), verbosity=silent)"]), - # ("divpot", ["--search", "astar(diverse_potentials(), verbosity=silent)"]), - ("ipdb-silent", ["--search", "astar(ipdb(), verbosity=silent)"]), - ("lmcut-silent", ["--search", "astar(lmcut(), verbosity=silent)"]), - ("mas-silent", [ - "--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false)," - " merge_strategy=merge_sccs(order_of_sccs=topological," - " merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order]))," - " label_reduction=exact(before_shrinking=true, before_merging=false)," - " max_states=50000, threshold_before_merge=1, verbosity=normal), verbosity=silent)"]), - # ("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]), verbosity=silent)"]), - ("h2-silent", ["--search", "astar(hm(m=2), verbosity=silent)"]), - ("hmax-silent", ["--search", "astar(hmax(), verbosity=silent)"]), - - ("bjolp-normal", [ - "--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc, verbosity=normal)"]), - ("blind-normal", ["--search", "astar(blind(), verbosity=normal)"]), - ("cegar-normal", ["--search", "astar(cegar(), verbosity=normal)"]), - # ("divpot", ["--search", "astar(diverse_potentials(), verbosity=normal)"]), - ("ipdb-normal", ["--search", "astar(ipdb(), verbosity=normal)"]), - ("lmcut-normal", ["--search", "astar(lmcut(), verbosity=normal)"]), - ("mas-normal", [ - "--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false)," - " merge_strategy=merge_sccs(order_of_sccs=topological," - " merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order]))," - " label_reduction=exact(before_shrinking=true, before_merging=false)," - " max_states=50000, threshold_before_merge=1, verbosity=normal), verbosity=normal)"]), - # ("seq+lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]), verbosity=normal)"]), - ("h2-normal", ["--search", "astar(hm(m=2), verbosity=normal)"]), - ("hmax-normal", ["--search", "astar(hmax(), verbosity=normal)"]), -] -CONFIGS = [ - IssueConfig(search_nick, search, - driver_options=["--overall-time-limit", "30m"]) - for rev in REVISIONS - for search_nick, search in SEARCHES -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('custom-parser.py') - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_parse_again_step() - -log_size = Attribute('log_size') -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [log_size] - -exp.add_absolute_report_step(attributes=attributes) -#exp.add_comparison_table_step() - -sort_spec = [('log_size', 'desc')] -attributes = ['run_dir', 'log_size'] -exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec,filter_algorithm=[ - "{}-bjolp-silent".format(REVISIONS[0]), - "{}-blind-silent".format(REVISIONS[0]), - "{}-cegar-silent".format(REVISIONS[0]), - "{}-ipdb-silent".format(REVISIONS[0]), - "{}-lmcut-silent".format(REVISIONS[0]), - "{}-mas-silent".format(REVISIONS[0]), - "{}-h2-silent".format(REVISIONS[0]), - "{}-hmax-silent".format(REVISIONS[0]), -],name="silent") -exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec,filter_algorithm=[ - "{}-bjolp-normal".format(REVISIONS[0]), - "{}-blind-normal".format(REVISIONS[0]), - "{}-cegar-normal".format(REVISIONS[0]), - "{}-ipdb-normal".format(REVISIONS[0]), - "{}-lmcut-normal".format(REVISIONS[0]), - "{}-mas-normal".format(REVISIONS[0]), - "{}-h2-normal".format(REVISIONS[0]), - "{}-hmax-normal".format(REVISIONS[0]), -],name="normal") - -exp.run_steps() diff --git a/experiments/issue744/v1-sat-30min.py b/experiments/issue744/v1-sat-30min.py deleted file mode 100755 index 1550cdc18c..0000000000 --- a/experiments/issue744/v1-sat-30min.py +++ /dev/null @@ -1,152 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os -import subprocess - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -EXPNAME = common_setup.get_experiment_name() -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue744-v1"] -CONFIG_DICT = { - "eager-greedy-ff-silent": [ - "--evaluator", - "h=ff()", - "--search", - "eager_greedy([h], preferred=[h], verbosity=silent)"], - "eager-greedy-cea-silent": [ - "--evaluator", - "h=cea()", - "--search", - "eager_greedy([h], preferred=[h], verbosity=silent)"], - "lazy-greedy-add-silent": [ - "--evaluator", - "h=add()", - "--search", - "lazy_greedy([h], preferred=[h], verbosity=silent)"], - "lazy-greedy-cg-silent": [ - "--evaluator", - "h=cg()", - "--search", - "lazy_greedy([h], preferred=[h], verbosity=silent)"], - "lama-first-silent": [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false, verbosity=silent)"""], - "lama-first-typed-silent": [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - "lazy(alt([single(hff), single(hff, pref_only=true)," - "single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000)," - "preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true," - "preferred_successors_first=false, verbosity=silent)"], - - "eager-greedy-ff-normal": [ - "--evaluator", - "h=ff()", - "--search", - "eager_greedy([h], preferred=[h], verbosity=normal)"], - "eager-greedy-cea-normal": [ - "--evaluator", - "h=cea()", - "--search", - "eager_greedy([h], preferred=[h], verbosity=normal)"], - "lazy-greedy-add-normal": [ - "--evaluator", - "h=add()", - "--search", - "lazy_greedy([h], preferred=[h], verbosity=normal)"], - "lazy-greedy-cg-normal": [ - "--evaluator", - "h=cg()", - "--search", - "lazy_greedy([h], preferred=[h], verbosity=normal)"], - "lama-first-normal": [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false, verbosity=normal)"""], - "lama-first-typed-normal": [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - "lazy(alt([single(hff), single(hff, pref_only=true)," - "single(hlm), single(hlm, pref_only=true), type_based([hff, g()])], boost=1000)," - "preferred=[hff,hlm], cost_type=one, reopen_closed=false, randomize_successors=true," - "preferred_successors_first=false, verbosity=normal)"], -} -CONFIGS = [ - IssueConfig(config_nick, config, - driver_options=["--overall-time-limit", "30m"]) - for rev in REVISIONS - for config_nick, config in CONFIG_DICT.items() -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('custom-parser.py') - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_parse_again_step() - -log_size = Attribute('log_size') -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [log_size] - -exp.add_absolute_report_step(attributes=attributes) -#exp.add_comparison_table_step() - -sort_spec = [('log_size', 'desc')] -attributes = ['run_dir', 'log_size'] -exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec,filter_algorithm=[ - "{}-eager-greedy-ff-silent".format(REVISIONS[0]), - "{}-eager-greedy-cea-silent".format(REVISIONS[0]), - "{}-lazy-greedy-add-silent".format(REVISIONS[0]), - "{}-lazy-greedy-cg-silent".format(REVISIONS[0]), - "{}-lama-first-silent".format(REVISIONS[0]), - "{}-lama-first-typed-silent".format(REVISIONS[0]), -],name="silent") -exp.add_sorted_report_step(attributes=attributes, sort_spec=sort_spec,filter_algorithm=[ - "{}-eager-greedy-ff-normal".format(REVISIONS[0]), - "{}-eager-greedy-cea-normal".format(REVISIONS[0]), - "{}-lazy-greedy-add-normal".format(REVISIONS[0]), - "{}-lazy-greedy-cg-normal".format(REVISIONS[0]), - "{}-lama-first-normal".format(REVISIONS[0]), - "{}-lama-first-typed-normal".format(REVISIONS[0]), -],name="normal") - -exp.run_steps() diff --git a/experiments/issue747/common_setup.py b/experiments/issue747/common_setup.py deleted file mode 100644 index 5d2b40b61c..0000000000 --- a/experiments/issue747/common_setup.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue747/relativescatter.py b/experiments/issue747/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue747/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue747/v1.py b/experiments/issue747/v1.py deleted file mode 100755 index 4c976724a7..0000000000 --- a/experiments/issue747/v1.py +++ /dev/null @@ -1,47 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue747-base", "issue747-v1"] -CONFIGS = [ - IssueConfig('lazy-greedy-blind', ['--search', 'lazy_greedy([blind()])']), - IssueConfig('lama-first', [], driver_options=["--alias", "lama-first"]), - IssueConfig('lwastar-ff', ["--heuristic", "h=ff()", "--search", "lazy_wastar([h],preferred=[h],w=5)"]) -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue748/common_setup.py b/experiments/issue748/common_setup.py deleted file mode 100644 index 5d2b40b61c..0000000000 --- a/experiments/issue748/common_setup.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue748/relativescatter.py b/experiments/issue748/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue748/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue748/v1-opt.py b/experiments/issue748/v1-opt.py deleted file mode 100755 index f96a3c0a89..0000000000 --- a/experiments/issue748/v1-opt.py +++ /dev/null @@ -1,46 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue748-base", "issue748-v1"] -CONFIGS = [ - IssueConfig('astar-blind', ['--search', 'astar(blind())']), - IssueConfig('astar-lmcut', ['--search', 'astar(lmcut())']), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="florian.pommerening@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue748/v1-sat.py b/experiments/issue748/v1-sat.py deleted file mode 100755 index 1218db0c95..0000000000 --- a/experiments/issue748/v1-sat.py +++ /dev/null @@ -1,48 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue748-base", "issue748-v1"] -CONFIGS = [ - IssueConfig('lazy-greedy-blind', ['--search', 'lazy_greedy([blind()])']), - IssueConfig('lama-first', [], driver_options=["--alias", "lama-first"]), - IssueConfig('lwastar-ff', ["--heuristic", "h=ff()", "--search", "lazy_wastar([h],preferred=[h],w=5)"]), - IssueConfig("ehc-ff", ["--search", "ehc(ff())"]), -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="florian.pommerening@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue750/common_setup.py b/experiments/issue750/common_setup.py deleted file mode 100644 index 5d2b40b61c..0000000000 --- a/experiments/issue750/common_setup.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue750/relativescatter.py b/experiments/issue750/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue750/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue750/v1-sat.py b/experiments/issue750/v1-sat.py deleted file mode 100755 index 8b7072dd5c..0000000000 --- a/experiments/issue750/v1-sat.py +++ /dev/null @@ -1,58 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue750-base", "issue750-v1"] -BUILD_OPTIONS = ["release32nolp"] -DRIVER_OPTIONS = ["--build", "release32nolp", "--search-time-limit", "1m"] -CONFIGS = [ - IssueConfig( - "blind", - ["--search", "astar(blind())"], - build_options=BUILD_OPTIONS, - driver_options=DRIVER_OPTIONS), - IssueConfig( - "lama-first", - [], - build_options=BUILD_OPTIONS, - driver_options=DRIVER_OPTIONS + ["--alias", "lama-first"]), -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue752/common_setup.py b/experiments/issue752/common_setup.py deleted file mode 100644 index 687019c482..0000000000 --- a/experiments/issue752/common_setup.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue752/relativescatter.py b/experiments/issue752/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue752/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue752/v1-new.py b/experiments/issue752/v1-new.py deleted file mode 100755 index 4bfdc37466..0000000000 --- a/experiments/issue752/v1-new.py +++ /dev/null @@ -1,36 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue752-v1"] -CONFIGS = [ - IssueConfig('astar-blind', ["--search", "astar(blind())"], - build_options=["release64"], driver_options=["--build", "release64"]), - IssueConfig('astar-seq-cplex1271', ["--search", "astar(operatorcounting([state_equation_constraints()], lpsolver=cplex))"], - build_options=["release64"], driver_options=["--build", "release64"]), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="florian.pommerening@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() - -exp.run_steps() diff --git a/experiments/issue752/v1-old.py b/experiments/issue752/v1-old.py deleted file mode 100755 index 122bd81e2d..0000000000 --- a/experiments/issue752/v1-old.py +++ /dev/null @@ -1,36 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue752-base"] -CONFIGS = [ - IssueConfig('astar-blind', ["--search", "astar(blind())"], - build_options=["release64"], driver_options=["--build", "release64"]), - IssueConfig('astar-seq-cplex1251', ["--search", "astar(operatorcounting([state_equation_constraints()], lpsolver=cplex))"], - build_options=["release64"], driver_options=["--build", "release64"]), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="florian.pommerening@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() - -exp.run_steps() diff --git a/experiments/issue752/v1-soplex.py b/experiments/issue752/v1-soplex.py deleted file mode 100755 index fbc30f42b2..0000000000 --- a/experiments/issue752/v1-soplex.py +++ /dev/null @@ -1,56 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue752-v1"] -CONFIGS = [ - IssueConfig('astar-seq-cplex', ["--search", "astar(operatorcounting([state_equation_constraints()], lpsolver=cplex))"], - build_options=["release64"], driver_options=["--build", "release64"]), - IssueConfig('astar-seq-soplex', ["--search", "astar(operatorcounting([state_equation_constraints()], lpsolver=soplex))"], - build_options=["release64"], driver_options=["--build", "release64"]), - IssueConfig('astar-seq-pho-cplex', ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=cplex))"], - build_options=["release64"], driver_options=["--build", "release64"]), - IssueConfig('astar-seq-pho-soplex', ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=soplex))"], - build_options=["release64"], driver_options=["--build", "release64"]), - IssueConfig('astar-seq-lmcut-cplex', ["--search", "astar(operatorcounting([state_equation_constraints(), pho_constraints(patterns=systematic(2))], lpsolver=cplex))"], - build_options=["release64"], driver_options=["--build", "release64"]), - IssueConfig('astar-seq-lmcut-soplex', ["--search", "astar(operatorcounting([state_equation_constraints(), pho_constraints(patterns=systematic(2))], lpsolver=soplex))"], - build_options=["release64"], driver_options=["--build", "release64"]), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="florian.pommerening@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_absolute_report_step() - -for attribute in ["total_time"]: - for config in ["astar-seq-pho", "astar-seq-lmcut"]: - for rev in REVISIONS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}-{}".format(rev, config, solver) for solver in ["cplex", "soplex"]], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}.png".format(exp.name, attribute, config) - ) - -exp.run_steps() diff --git a/experiments/issue752/v2.py b/experiments/issue752/v2.py deleted file mode 100755 index 4bf3ba1005..0000000000 --- a/experiments/issue752/v2.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue752-v2"] -CONFIGS = [ - IssueConfig("opcount-seq-lmcut-soplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=soplex))"]), - IssueConfig("diverse-potentials-soplex", ["--search", "astar(diverse_potentials(lpsolver=soplex))"]), - IssueConfig("optimal-lmcount-soplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=soplex))"]), - IssueConfig("opcount-seq-lmcut-cplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=cplex))"]), - IssueConfig("diverse-potentials-cplex", ["--search", "astar(diverse_potentials(lpsolver=cplex))"]), - IssueConfig("optimal-lmcount-cplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=cplex))"]), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(partition="infai_2", email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() - -for nick in ["opcount-seq-lmcut", "diverse-potentials", "optimal-lmcount"]: - exp.add_report(RelativeScatterPlotReport( - attributes=["total_time"], - filter_algorithm=["issue752-v2-%s-%s" % (nick, solver) for solver in ["cplex", "soplex"]], - get_category=lambda r1, r2: r1["domain"]), - outfile="issue752-v2-scatter-total-time-%s.png" % nick) - -exp.run_steps() diff --git a/experiments/issue752/v3.py b/experiments/issue752/v3.py deleted file mode 100755 index be382c662f..0000000000 --- a/experiments/issue752/v3.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue752-v3"] -CONFIGS = [ - IssueConfig("opcount-seq-lmcut-soplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=soplex))"]), - IssueConfig("diverse-potentials-soplex", ["--search", "astar(diverse_potentials(lpsolver=soplex))"]), - IssueConfig("optimal-lmcount-soplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=soplex))"]), - IssueConfig("opcount-seq-lmcut-cplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=cplex))"]), - IssueConfig("diverse-potentials-cplex", ["--search", "astar(diverse_potentials(lpsolver=cplex))"]), - IssueConfig("optimal-lmcount-cplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=cplex))"]), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(partition="infai_2", email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() - -for nick in ["opcount-seq-lmcut", "diverse-potentials", "optimal-lmcount"]: - exp.add_report(RelativeScatterPlotReport( - attributes=["total_time"], - filter_algorithm=["issue752-v3-%s-%s" % (nick, solver) for solver in ["cplex", "soplex"]], - get_category=lambda r1, r2: r1["domain"]), - outfile="issue752-v3-scatter-total-time-%s.png" % nick) - -exp.run_steps() diff --git a/experiments/issue768/common_setup.py b/experiments/issue768/common_setup.py deleted file mode 100644 index 5d2b40b61c..0000000000 --- a/experiments/issue768/common_setup.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue768/relativescatter.py b/experiments/issue768/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue768/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue768/v1-opt.py b/experiments/issue768/v1-opt.py deleted file mode 100755 index 0de27541c1..0000000000 --- a/experiments/issue768/v1-opt.py +++ /dev/null @@ -1,47 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue768-base", "issue768-v1"] -CONFIGS = [ - IssueConfig('ipdb', ['--search', 'astar(ipdb(max_time=900))']), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue77/common_setup.py b/experiments/issue77/common_setup.py deleted file mode 100644 index 88167271e7..0000000000 --- a/experiments/issue77/common_setup.py +++ /dev/null @@ -1,364 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import sys - -from lab.environments import LocalEnvironment, MaiaEnvironment -from lab.experiment import ARGPARSER -from lab.steps import Step - -from downward.experiments import DownwardExperiment, _get_rev_nick -from downward.checkouts import Translator, Preprocessor, Planner -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import CompareRevisionsReport -from downward.reports.scatter import ScatterPlotReport - -from relative_scatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -def get_script(): - """Get file name of main script.""" - import __main__ - return __main__.__file__ - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ("cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and - not is_running_on_cluster()) - - -class IssueExperiment(DownwardExperiment): - """Wrapper for DownwardExperiment with a few convenience features.""" - - DEFAULT_TEST_SUITE = "gripper:prob01.pddl" - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "plan_length", - ] - - def __init__(self, configs, suite, grid_priority=None, path=None, - repo=None, revisions=None, search_revisions=None, - test_suite=None, **kwargs): - """Create a DownwardExperiment with some convenience features. - - *configs* must be a non-empty dict of {nick: cmdline} pairs - that sets the planner configurations to test. :: - - IssueExperiment(configs={ - "lmcut": ["--search", "astar(lmcut())"], - "ipdb": ["--search", "astar(ipdb())"]}) - - *suite* sets the benchmarks for the experiment. It must be a - single string or a list of strings specifying domains or - tasks. The downward.suites module has many predefined - suites. :: - - IssueExperiment(suite=["grid", "gripper:prob01.pddl"]) - - from downward import suites - IssueExperiment(suite=suites.suite_all()) - IssueExperiment(suite=suites.suite_satisficing_with_ipc11()) - IssueExperiment(suite=suites.suite_optimal()) - - Use *grid_priority* to set the job priority for cluster - experiments. It must be in the range [-1023, 0] where 0 is the - highest priority. By default the priority is 0. :: - - IssueExperiment(grid_priority=-500) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - If *repo* is specified, it must be the path to the root of a - local Fast Downward repository. If omitted, the repository - is derived automatically from the main script's path. Example:: - - script = /path/to/fd-repo/experiments/issue123/exp01.py --> - repo = /path/to/fd-repo - - If *revisions* is specified, it should be a non-empty - list of revisions, which specify which planner versions to use - in the experiment. The same versions are used for translator, - preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"]) - - If *search_revisions* is specified, it should be a non-empty - list of revisions, which specify which search component - versions to use in the experiment. All runs use the - translator and preprocessor component of the first - revision. :: - - IssueExperiment(search_revisions=["default", "issue123"]) - - If you really need to specify the (translator, preprocessor, - planner) triples manually, use the *combinations* parameter - from the base class (might be deprecated soon). The options - *revisions*, *search_revisions* and *combinations* can be - freely mixed, but at least one of them must be given. - - Specify *test_suite* to set the benchmarks for experiment test - runs. By default the first gripper task is used. - - IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"]) - - """ - - if is_test_run(): - kwargs["environment"] = LocalEnvironment() - suite = test_suite or self.DEFAULT_TEST_SUITE - elif "environment" not in kwargs: - kwargs["environment"] = MaiaEnvironment(priority=grid_priority) - - if path is None: - path = get_data_dir() - - if repo is None: - repo = get_repo_base() - - kwargs.setdefault("combinations", []) - - if not any([revisions, search_revisions, kwargs["combinations"]]): - raise ValueError('At least one of "revisions", "search_revisions" ' - 'or "combinations" must be given') - - if revisions: - kwargs["combinations"].extend([ - (Translator(repo, rev), - Preprocessor(repo, rev), - Planner(repo, rev)) - for rev in revisions]) - - if search_revisions: - base_rev = search_revisions[0] - # Use the same nick for all parts to get short revision nick. - kwargs["combinations"].extend([ - (Translator(repo, base_rev, nick=rev), - Preprocessor(repo, base_rev, nick=rev), - Planner(repo, rev, nick=rev)) - for rev in search_revisions]) - - DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs) - - self._config_nicks = [] - for nick, config in configs.items(): - self.add_config(nick, config) - - self.add_suite(suite) - - @property - def revision_nicks(self): - # TODO: Once the add_algorithm() API is available we should get - # rid of the call to _get_rev_nick() and avoid inspecting the - # list of combinations by setting and saving the algorithm nicks. - return [_get_rev_nick(*combo) for combo in self.combinations] - - def add_config(self, nick, config, timeout=None): - DownwardExperiment.add_config(self, nick, config, timeout=timeout) - self._config_nicks.append(nick) - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't - compare revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If - the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = get_experiment_name() + "." + report.output_format - self.add_report(report, outfile=outfile) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revision triples. Each report pairs up the runs of the same - config and lists the two absolute attribute values and their - difference for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareRevisionsReport - class. If the keyword argument *attributes* is not - specified, a default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self.revision_nicks, 2): - report = CompareRevisionsReport(rev1, rev2, **kwargs) - outfile = os.path.join(self.eval_dir, - "%s-%s-%s-compare.html" % - (self.name, rev1, rev2)) - report(self.eval_dir, outfile) - - self.add_step(Step("make-comparison-tables", make_comparison_tables)) - - def add_scatter_plot_step(self, attributes=None, relative=False): - """Add a step that creates scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revision pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - Use `relative=True` to create relative scatter plots. :: - - exp.add_scatter_plot_step(relative=True) - - """ - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - if relative: - scatter_plot_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "relative-scatter") - else: - scatter_plot_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter") - - def is_portfolio(config_nick): - return "fdss" in config_nick - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "%s-%s" % (rev1, config_nick) - algo2 = "%s-%s" % (rev2, config_nick) - report = scatter_plot_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report(self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config_nick in self._config_nicks: - if is_portfolio(config_nick): - valid_attributes = [ - attr for attr in attributes - if attr in self.PORTFOLIO_ATTRIBUTES] - else: - valid_attributes = attributes - for rev1, rev2 in itertools.combinations( - self.revision_nicks, 2): - for attribute in valid_attributes: - make_scatter_plot(config_nick, rev1, rev2, attribute) - - self.add_step(Step("make-scatter-plots", make_scatter_plots)) diff --git a/experiments/issue77/configs.py b/experiments/issue77/configs.py deleted file mode 100644 index e47acd9852..0000000000 --- a/experiments/issue77/configs.py +++ /dev/null @@ -1,222 +0,0 @@ -def configs_optimal_core(): - return { - # A* - "astar_blind": [ - "--search", - "astar(blind)"], - "astar_h2": [ - "--search", - "astar(hm(2))"], - "astar_ipdb": [ - "--search", - "astar(ipdb)"], - "astar_lmcount_lm_merged_rhw_hm": [ - "--search", - "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)"], - "astar_lmcut": [ - "--search", - "astar(lmcut)"], - "astar_hmax": [ - "--search", - "astar(hmax)"], - "astar_merge_and_shrink_bisim": [ - "--search", - "astar(merge_and_shrink(" - "merge_strategy=merge_linear(variable_order=reverse_level)," - "shrink_strategy=shrink_bisimulation(max_states=200000,greedy=false," - "group_by_h=true)))"], - "astar_merge_and_shrink_greedy_bisim": [ - "--search", - "astar(merge_and_shrink(" - "merge_strategy=merge_linear(variable_order=reverse_level)," - "shrink_strategy=shrink_bisimulation(max_states=infinity,threshold=1," - "greedy=true,group_by_h=false)))"], - "astar_merge_and_shrink_dfp_bisim": [ - "--search", - "astar(merge_and_shrink(merge_strategy=merge_dfp," - "shrink_strategy=shrink_bisimulation(max_states=50000,threshold=1," - "greedy=false,group_by_h=true)))"], - #"astar_selmax_lmcut_lmcount": [ - # "--search", - # "astar(selmax([lmcut(),lmcount(lm_merged([lm_hm(m=1),lm_rhw()])," - # "admissible=true)],training_set=1000),mpd=true)"], - } - - -def configs_satisficing_core(): - return { - # A* - "astar_goalcount": [ - "--search", - "astar(goalcount)"], - # eager greedy - "eager_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "eager_greedy(h, preferred=h)"], - "eager_greedy_add": [ - "--heuristic", - "h=add()", - "--search", - "eager_greedy(h, preferred=h)"], - "eager_greedy_cg": [ - "--heuristic", - "h=cg()", - "--search", - "eager_greedy(h, preferred=h)"], - "eager_greedy_cea": [ - "--heuristic", - "h=cea()", - "--search", - "eager_greedy(h, preferred=h)"], - # lazy greedy - "lazy_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "lazy_greedy(h, preferred=h)"], - "lazy_greedy_add": [ - "--heuristic", - "h=add()", - "--search", - "lazy_greedy(h, preferred=h)"], - "lazy_greedy_cg": [ - "--heuristic", - "h=cg()", - "--search", - "lazy_greedy(h, preferred=h)"], - } - - -def configs_optimal_ipc(): - return { - "seq_opt_merge_and_shrink": ["ipc", "seq-opt-merge-and-shrink"], - "seq_opt_fdss_1": ["ipc", "seq-opt-fdss-1"], - "seq_opt_fdss_2": ["ipc", "seq-opt-fdss-2"], - } - - -def configs_satisficing_ipc(): - return { - "seq_sat_lama_2011": ["ipc", "seq-sat-lama-2011"], - "seq_sat_fdss_1": ["ipc", "seq-sat-fdss-1"], - "seq_sat_fdss_2": ["ipc", "seq-sat-fdss-2"], - } - - -def configs_optimal_extended(): - return { - # A* - "astar_lmcount_lm_merged_rhw_hm_no_order": [ - "--search", - "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true),mpd=true)"], - } - - -def configs_satisficing_extended(): - return { - # eager greedy - "eager_greedy_alt_ff_cg": [ - "--heuristic", - "hff=ff()", - "--heuristic", - "hcg=cg()", - "--search", - "eager_greedy(hff,hcg,preferred=[hff,hcg])"], - "eager_greedy_ff_no_pref": [ - "--search", - "eager_greedy(ff())"], - # lazy greedy - "lazy_greedy_alt_cea_cg": [ - "--heuristic", - "hcea=cea()", - "--heuristic", - "hcg=cg()", - "--search", - "lazy_greedy(hcea,hcg,preferred=[hcea,hcg])"], - "lazy_greedy_ff_no_pref": [ - "--search", - "lazy_greedy(ff())"], - "lazy_greedy_cea": [ - "--heuristic", - "h=cea()", - "--search", - "lazy_greedy(h, preferred=h)"], - # lazy wA* - "lazy_wa3_ff": [ - "--heuristic", - "h=ff()", - "--search", - "lazy_wastar(h,w=3,preferred=h)"], - # eager wA* - "eager_wa3_cg": [ - "--heuristic", - "h=cg()", - "--search", - "eager(single(sum([g(),weight(h,3)])),preferred=h)"], - # ehc - "ehc_ff": [ - "--search", - "ehc(ff())"], - # iterated - "iterated_wa_ff": [ - "--heuristic", - "h=ff()", - "--search", - "iterated([lazy_wastar(h,w=10), lazy_wastar(h,w=5), lazy_wastar(h,w=3)," - "lazy_wastar(h,w=2), lazy_wastar(h,w=1)])"], - # pareto open list - "eager_pareto_ff": [ - "--heuristic", - "h=ff()", - "--search", - "eager(pareto([sum([g(), h]), h]), reopen_closed=true," - "f_eval=sum([g(), h]))"], - # bucket-based open list - "eager_bucket_lmcut": [ - "--heuristic", - "h=lmcut()", - "--search", - "eager(single_buckets(h), reopen_closed=true)"], - # LAMA's first iteration - "lama_first": [ - "--if-unit-cost", - "--heuristic", - "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true))", - "--search", - "lazy_greedy([hff,hlm],preferred=[hff,hlm])", - "--if-non-unit-cost", - "--heuristic", - "hlm1,hff1=lm_ff_syn(lm_rhw(reasonable_orders=true," - " lm_cost_type=one,cost_type=one))", - "--heuristic", - "hlm2,hff2=lm_ff_syn(lm_rhw(reasonable_orders=true," - " lm_cost_type=plusone,cost_type=plusone))", - "--search", - "lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1]," - " cost_type=one,reopen_closed=false)", - "--always"], - } - - -def default_configs_optimal(core=True, ipc=True, extended=False): - configs = {} - if core: - configs.update(configs_optimal_core()) - if ipc: - configs.update(configs_optimal_ipc()) - if extended: - configs.update(configs_optimal_extended()) - return configs - - -def default_configs_satisficing(core=True, ipc=True, extended=False): - configs = {} - if core: - configs.update(configs_satisficing_core()) - if ipc: - configs.update(configs_satisficing_ipc()) - if extended: - configs.update(configs_satisficing_extended()) - return configs diff --git a/experiments/issue77/issue77-opt1.py b/experiments/issue77/issue77-opt1.py deleted file mode 100755 index 7ad5ad80ce..0000000000 --- a/experiments/issue77/issue77-opt1.py +++ /dev/null @@ -1,33 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import downward.configs -import downward.suites - -# "ipc=False" skips portfolio configurations which we don't need to -# test here. -CONFIGS = downward.configs.default_configs_optimal(ipc=False, extended=True) - -# pathmax is gone in this branch, remove it: -for key, value in list(CONFIGS.items()): - for pos, arg in enumerate(value): - if ", pathmax=false" in arg: - value[pos] = arg.replace(", pathmax=false", "") - -# selmax is currently disabled -del CONFIGS["astar_selmax_lmcut_lmcount"] - -SUITE = downward.suites.suite_optimal_with_ipc11() - -import common_setup - -exp = common_setup.IssueExperiment( - search_revisions=["issue77-base", "issue77-v2"], - configs=CONFIGS, - suite=SUITE - ) -exp.add_absolute_report_step() -exp.add_comparison_table_step() -# exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue77/issue77-sat1.py b/experiments/issue77/issue77-sat1.py deleted file mode 100755 index e241c00267..0000000000 --- a/experiments/issue77/issue77-sat1.py +++ /dev/null @@ -1,42 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import downward.configs -import downward.suites - -CONFIGS = downward.configs.default_configs_satisficing(extended=True) - -# The following lines remove some configs that we don't currently -# support because the respective configurations are commented out - -DISABLED = [ - "seq_sat_fdss_1", - "seq_sat_fdss_2", - "seq_sat_lama_2011", -] -for key, value in list(CONFIGS.items()): - if key in DISABLED or key.startswith(("lazy", "iterated", "ehc")): - del CONFIGS[key] - else: - for pos, arg in enumerate(value): - if ", pathmax=false" in arg: - # pathmax is gone in this branch - value[pos] = arg.replace(", pathmax=false", "") -print(sorted(CONFIGS.keys())) -print(len(CONFIGS)) - - -SUITE = downward.suites.suite_satisficing_with_ipc11() - -import common_setup - -exp = common_setup.IssueExperiment( - search_revisions=["issue77-base", "issue77-v1"], - configs=CONFIGS, - suite=SUITE - ) -exp.add_absolute_report_step() -exp.add_comparison_table_step() -# exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue77/issue77-sat2.py b/experiments/issue77/issue77-sat2.py deleted file mode 100755 index 4ea6aa544f..0000000000 --- a/experiments/issue77/issue77-sat2.py +++ /dev/null @@ -1,26 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -import downward.suites - -# This experiment only tests the Lama-FF synergy, which sat1 did not -# test because it did not work in the issue77 branch. -CONFIGS = { - "synergy": - ["--heuristic", "hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true))", - "--search", "eager_greedy([hff,hlm],preferred=[hff,hlm])"], - } - -SUITE = downward.suites.suite_satisficing_with_ipc11() - -exp = common_setup.IssueExperiment( - search_revisions=["issue77-v3-base", "issue77-v3"], - configs=CONFIGS, - suite=SUITE - ) -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue77/issue77-v4-opt.py b/experiments/issue77/issue77-v4-opt.py deleted file mode 100755 index 1db2ee1b60..0000000000 --- a/experiments/issue77/issue77-v4-opt.py +++ /dev/null @@ -1,26 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import downward.suites - -import common_setup -import configs - -CONFIGS = configs.default_configs_optimal(ipc=False, extended=False) - -print(sorted(CONFIGS.keys())) -print(len(CONFIGS)) - - -SUITE = downward.suites.suite_optimal_with_ipc11() - -exp = common_setup.IssueExperiment( - search_revisions=["issue77-v3", "issue77-v4"], - configs=CONFIGS, - suite=SUITE - ) -exp.add_absolute_report_step() -exp.add_comparison_table_step() -# exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue77/issue77-v4-sat-eager.py b/experiments/issue77/issue77-v4-sat-eager.py deleted file mode 100755 index c8c4d81f1c..0000000000 --- a/experiments/issue77/issue77-v4-sat-eager.py +++ /dev/null @@ -1,34 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import downward.suites - -import common_setup -import configs - -CONFIGS = configs.default_configs_satisficing(ipc=False, extended=False) - -# The following lines remove some configs that we don't currently -# support. - -DISABLED = [ -] -for key, value in list(CONFIGS.items()): - if key in DISABLED or key.startswith(("lazy", "iterated", "ehc")): - del CONFIGS[key] -print(sorted(CONFIGS.keys())) -print(len(CONFIGS)) - - -SUITE = downward.suites.suite_satisficing_with_ipc11() - -exp = common_setup.IssueExperiment( - search_revisions=["issue77-v3", "issue77-v4"], - configs=CONFIGS, - suite=SUITE - ) -exp.add_absolute_report_step() -exp.add_comparison_table_step() -# exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue77/issue77-v4-sat-lazy.py b/experiments/issue77/issue77-v4-sat-lazy.py deleted file mode 100755 index 0ddf3c8286..0000000000 --- a/experiments/issue77/issue77-v4-sat-lazy.py +++ /dev/null @@ -1,31 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import downward.suites - -import common_setup -import configs - -CONFIGS = configs.default_configs_satisficing(ipc=False, extended=False) - -DISABLED = [ -] -for key, value in list(CONFIGS.items()): - if not key.startswith("lazy"): - del CONFIGS[key] -print(sorted(CONFIGS.keys())) -print(len(CONFIGS)) - - -SUITE = downward.suites.suite_satisficing_with_ipc11() - -exp = common_setup.IssueExperiment( - search_revisions=["issue77-base", "issue77-v4"], - configs=CONFIGS, - suite=SUITE - ) -exp.add_absolute_report_step() -exp.add_comparison_table_step() -# exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue77/issue77-v5-sat-lazy.py b/experiments/issue77/issue77-v5-sat-lazy.py deleted file mode 100755 index cbb3523a59..0000000000 --- a/experiments/issue77/issue77-v5-sat-lazy.py +++ /dev/null @@ -1,29 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import downward.suites - -import common_setup -import configs - -CONFIGS = {} -INCLUDE = ("lazy", "lama") -EXCLUDE = ("lazy_greedy_add", "lazy_greedy_cea", "lazy_greedy_cg") -for key, value in configs.default_configs_satisficing(ipc=False, extended=True).items(): - if any(x in key for x in INCLUDE) and not any(x in key for x in EXCLUDE): - CONFIGS[key] = value -print(sorted(CONFIGS.keys())) -print(len(CONFIGS)) - -SUITE = downward.suites.suite_satisficing_with_ipc11() - -exp = common_setup.IssueExperiment( - search_revisions=["issue77-v5-base", "issue77-v5"], - configs=CONFIGS, - suite=SUITE - ) -exp.add_absolute_report_step() -exp.add_comparison_table_step() -# exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue77/issue77-v6-sat-ehc.py b/experiments/issue77/issue77-v6-sat-ehc.py deleted file mode 100755 index f93b85e615..0000000000 --- a/experiments/issue77/issue77-v6-sat-ehc.py +++ /dev/null @@ -1,28 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import downward.suites - -import common_setup - -CONFIGS = { - "ehc_ff": [ - "--search", "ehc(ff())"], - "ehc_add_pref": [ - "--heuristic", "hadd=add()", "--search", "ehc(hadd, preferred=[hadd])"], - #"ehc_add_ff_pref": [ - # "--search", "ehc(add(), preferred=[ff()],preferred_usage=RANK_PREFERRED_FIRST)"], -} - -SUITE = downward.suites.suite_satisficing_with_ipc11() - -exp = common_setup.IssueExperiment( - search_revisions=["issue77-v6-base", "issue77-v6"], - configs=CONFIGS, - suite=SUITE - ) -exp.add_absolute_report_step() -exp.add_comparison_table_step() -# exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue77/issue77-v7-opt.py b/experiments/issue77/issue77-v7-opt.py deleted file mode 100755 index d8f89ba0d7..0000000000 --- a/experiments/issue77/issue77-v7-opt.py +++ /dev/null @@ -1,26 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import downward.suites - -import common_setup -import configs - -CONFIGS = configs.default_configs_optimal(ipc=False, extended=False) - -print(sorted(CONFIGS.keys())) -print(len(CONFIGS)) - -SUITE = downward.suites.suite_optimal_with_ipc11() -SCATTER_ATTRIBUTES = ["total_time"] - -exp = common_setup.IssueExperiment( - search_revisions=["issue77-v7-base", "issue77-v7"], - configs=CONFIGS, - suite=SUITE - ) -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(attributes=SCATTER_ATTRIBUTES, relative=True) - -exp() diff --git a/experiments/issue77/issue77-v7-sat-eager.py b/experiments/issue77/issue77-v7-sat-eager.py deleted file mode 100755 index 1a1f5165b9..0000000000 --- a/experiments/issue77/issue77-v7-sat-eager.py +++ /dev/null @@ -1,31 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import downward.suites - -import common_setup -import configs - -NICKS = [ - 'eager_greedy_alt_ff_cg', 'eager_greedy_ff', 'eager_greedy_ff_no_pref', - 'eager_pareto_ff', 'eager_wa3_cg' -] -CONFIGS = {} -for nick in NICKS: - CONFIGS[nick] = configs.default_configs_satisficing(ipc=False, extended=True)[nick] - -print(sorted(CONFIGS.keys())) -print(len(CONFIGS)) - -SUITE = downward.suites.suite_satisficing_with_ipc11() - -exp = common_setup.IssueExperiment( - search_revisions=["issue77-v7-base", "issue77-v7"], - configs=CONFIGS, - suite=SUITE - ) -exp.add_absolute_report_step() -exp.add_comparison_table_step() -# exp.add_scatter_plot_step() - -exp() diff --git a/experiments/issue77/relative_scatter.py b/experiments/issue77/relative_scatter.py deleted file mode 100644 index 46577f3891..0000000000 --- a/experiments/issue77/relative_scatter.py +++ /dev/null @@ -1,85 +0,0 @@ -from collections import defaultdict - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport - - -EPSILON = 0.01 - - -def get_relative_change(val1, val2): - """ - >>> get_relative_change(10, 0) - -999.0 - >>> get_relative_change(10, 1) - -9.0 - >>> get_relative_change(10, 5) - -1.0 - >>> get_relative_change(10, 10) - 0.0 - >>> get_relative_change(10, 15) - 0.5 - >>> get_relative_change(10, 20) - 1.0 - >>> get_relative_change(10, 100) - 9.0 - >>> get_relative_change(0, 10) - 999.0 - >>> get_relative_change(0, 0) - 0.0 - """ - assert val1 >= 0, val1 - assert val2 >= 0, val2 - if val1 == 0: - val1 = EPSILON - if val2 == 0: - val2 = EPSILON - if val1 > val2: - return 1 - val1 / float(val2) - return val2 / float(val1) - 1 - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a specific attribute in two - configurations. The attribute value in config 1 is shown on the - x-axis and the relation to the value in config 2 on the y-axis. - If the value for config 1 is v1 and the value for config 2 is v2, - the plot contains the point (v1, 1 - v1/v2) if v1 > v2 and the - point (v1, v2/v1 - 1) otherwise. - """ - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples. - categories = defaultdict(list) - self.ylim_bottom = 0 - self.ylim_top = 0 - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['config'] == self.configs[0] and - run2['config'] == self.configs[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 >= 0, (domain, problem, self.configs[0], val1) - assert val2 >= 0, (domain, problem, self.configs[1], val2) - x = val1 - y = get_relative_change(val1, val2) - categories[category].append((x, y)) - self.ylim_bottom = min(self.ylim_bottom, y) - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom *= 1.1 - self.ylim_top *= 1.1 - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlots use log-scaling on the x-axis by default. - default_xscale = 'log' - if self.attribute and self.attribute in self.LINEAR: - default_xscale = 'linear' - PlotReport._set_scales(self, xscale or default_xscale, 'linear') diff --git a/experiments/issue773/common_setup.py b/experiments/issue773/common_setup.py deleted file mode 100644 index 687019c482..0000000000 --- a/experiments/issue773/common_setup.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue773/relativescatter.py b/experiments/issue773/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue773/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue773/v1-opt.py b/experiments/issue773/v1-opt.py deleted file mode 100755 index cf689a3b4e..0000000000 --- a/experiments/issue773/v1-opt.py +++ /dev/null @@ -1,49 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue773-base", "issue773-v1"] -CONFIGS = [ - IssueConfig('lmcut-sss-min-pruning-ratio', ['--search', 'astar(lmcut(), pruning=stubborn_sets_simple(min_pruning_ratio=0.2))']), - IssueConfig('lmcut-sss', ['--search', 'astar(lmcut(), pruning=stubborn_sets_simple())']), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue774/common_setup.py b/experiments/issue774/common_setup.py deleted file mode 100644 index e487c1d037..0000000000 --- a/experiments/issue774/common_setup.py +++ /dev/null @@ -1,387 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return ( - node == "login-infai.scicore.unibas.ch" or - node.endswith(".cluster.bc2.ch") or - "cluster" in node or - node.startswith("gkigrid") or - node in ["habakuk", "turtur"]) - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue774/relativescatter.py b/experiments/issue774/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue774/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue774/v1-sat.py b/experiments/issue774/v1-sat.py deleted file mode 100755 index 99047541aa..0000000000 --- a/experiments/issue774/v1-sat.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue774-base", "issue774-v1"] -CONFIGS = [ - IssueConfig("blind", ["--search", "astar(blind())"]), - IssueConfig("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=[h])"]), - IssueConfig( - "lama-first", - [], - driver_options=["--alias", "lama-first"]), -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attr in ["total_time", "search_time", "memory"]: - for rev1, rev2 in [("base", "v1")]: - for config_nick in ["blind", "ehc_ff", "lama-first"]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue774-%s-%s" % (rev1, config_nick), - "issue774-%s-%s" % (rev2, config_nick)], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue774-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2)) - - -exp.run_steps() diff --git a/experiments/issue774/v2-sat.py b/experiments/issue774/v2-sat.py deleted file mode 100755 index ecc47e8f75..0000000000 --- a/experiments/issue774/v2-sat.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue774-v2-base", "issue774-v2"] -CONFIGS = [ - IssueConfig("blind", ["--search", "astar(blind())"]), - IssueConfig("ehc_ff", ["--heuristic", "h=ff()", "--search", "ehc(h, preferred=[h])"]), - IssueConfig( - "lama-first", - [], - driver_options=["--alias", "lama-first"]), -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attr in ["total_time", "search_time", "memory"]: - for rev1, rev2 in [("v2-base", "v2")]: - for config_nick in ["blind", "ehc_ff", "lama-first"]: - exp.add_report(RelativeScatterPlotReport( - attributes=[attr], - filter_algorithm=["issue774-%s-%s" % (rev1, config_nick), - "issue774-%s-%s" % (rev2, config_nick)], - get_category=lambda r1, r2: r1["domain"], - ), - outfile="issue774-%s-%s-%s-%s.png" % (config_nick, attr, rev1, rev2)) - - -exp.run_steps() diff --git a/experiments/issue776/common_setup.py b/experiments/issue776/common_setup.py deleted file mode 100644 index 687019c482..0000000000 --- a/experiments/issue776/common_setup.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue776/relativescatter.py b/experiments/issue776/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue776/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue776/v1-lama-second.py b/experiments/issue776/v1-lama-second.py deleted file mode 100755 index aa7996ca02..0000000000 --- a/experiments/issue776/v1-lama-second.py +++ /dev/null @@ -1,62 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue776-base", "issue776-v1"] -CONFIGS = [ - IssueConfig('lama-second', [ - "--heuristic", - "hlm2=lama_synergy(lm_rhw(reasonable_orders=true,lm_cost_type=plusone),transform=adapt_costs(plusone))", - "--heuristic", - "hff2=ff_synergy(hlm2)", - "--search", - "lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2],reopen_closed=false)" - ]), -] - -SUITE = [ - 'barman-opt11-strips', 'barman-sat11-strips', 'citycar-opt14-adl', - 'citycar-sat14-adl', 'elevators-opt08-strips', 'elevators-opt11-strips', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'openstacks-opt08-adl', - 'openstacks-sat08-adl', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', 'parking-opt14-strips', - 'parking-sat11-strips', 'parking-sat14-strips', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pegsol-sat11-strips', 'scanalyzer-08-strips', - 'scanalyzer-opt11-strips', 'scanalyzer-sat11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'tetris-opt14-strips', 'tetris-sat14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips' -] - -ENVIRONMENT = BaselSlurmEnvironment(email="silvan.sievers@unibas.ch",partition='infai_1') - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parse_again_step() - -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue776/v1.py b/experiments/issue776/v1.py deleted file mode 100755 index b49a0a9db0..0000000000 --- a/experiments/issue776/v1.py +++ /dev/null @@ -1,38 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue776-base", "issue776-v1"] -CONFIGS = [ - IssueConfig('lama', [], driver_options=['--alias', 'seq-sat-lama-2011']), - IssueConfig('lama-first', [], driver_options=['--alias', 'lama-first']), - IssueConfig('bjolp', [], driver_options=['--alias', 'seq-opt-bjolp']), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="silvan.sievers@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue776/v2-lama.py b/experiments/issue776/v2-lama.py deleted file mode 100755 index 586ce336fb..0000000000 --- a/experiments/issue776/v2-lama.py +++ /dev/null @@ -1,43 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue776-v2-base", "issue776-v2"] -CONFIGS = [ - IssueConfig('lama', [], driver_options=['--alias', 'seq-sat-lama-2011', '--overall-time-limit', '5m']), -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue776/v2.py b/experiments/issue776/v2.py deleted file mode 100755 index 328039a4ee..0000000000 --- a/experiments/issue776/v2.py +++ /dev/null @@ -1,44 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue776-v2-base", "issue776-v2"] -CONFIGS = [ - IssueConfig('lama-first', [], driver_options=['--alias', 'lama-first', '--overall-time-limit', '5m']), - IssueConfig('bjolp', [], driver_options=['--alias', 'seq-opt-bjolp', '--overall-time-limit', '5m']), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue778/common_setup.py b/experiments/issue778/common_setup.py deleted file mode 100644 index 687019c482..0000000000 --- a/experiments/issue778/common_setup.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue778/relativescatter.py b/experiments/issue778/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue778/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue778/v1-no-min-ratio.py b/experiments/issue778/v1-no-min-ratio.py deleted file mode 100755 index 2db4e9319d..0000000000 --- a/experiments/issue778/v1-no-min-ratio.py +++ /dev/null @@ -1,58 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue778-base", "issue778-v1"] -CONFIGS = [ - IssueConfig( - heuristic + "-" + pruning, - ["--search", "astar({heuristic}(), pruning=stubborn_sets_{pruning}())".format(**locals())]) - for heuristic in ["blind", "lmcut"] - for pruning in ["ec", "simple"] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER) -exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER) -#exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER) -exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER) - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue778/v1-opt.py b/experiments/issue778/v1-opt.py deleted file mode 100755 index ad45d995db..0000000000 --- a/experiments/issue778/v1-opt.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue778-base", "issue778-v1"] -CONFIGS = [ - IssueConfig('lmcut-sss-ratio-0.2', ['--search', 'astar(lmcut(), pruning=stubborn_sets_simple(min_required_pruning_ratio=0.2))']), - IssueConfig('lmcut-ssec-ratio-0.2', ['--search', 'astar(lmcut(), pruning=stubborn_sets_simple(min_required_pruning_ratio=0.2))']), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER) -exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER) -#exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER) -exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER) - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue781/common_setup.py b/experiments/issue781/common_setup.py deleted file mode 100644 index 4c010703f3..0000000000 --- a/experiments/issue781/common_setup.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - revisions = revisions or [] - configs = configs or [] - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue781/parser.py b/experiments/issue781/parser.py deleted file mode 100755 index 2f817c745c..0000000000 --- a/experiments/issue781/parser.py +++ /dev/null @@ -1,13 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - - -def main(): - print 'Running custom parser' - parser = Parser() - parser.add_pattern('time_for_pruning_operators', r'^Time for pruning operators: (.+)s$', type=float, flags="M") - parser.parse() - - -main() diff --git a/experiments/issue781/relativescatter.py b/experiments/issue781/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue781/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue781/v1-blind.py b/experiments/issue781/v1-blind.py deleted file mode 100755 index 1d884ba74c..0000000000 --- a/experiments/issue781/v1-blind.py +++ /dev/null @@ -1,47 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue781-v1"] -CONFIGS = [ - IssueConfig( - heuristic + "-" + pruning, - ["--search", "astar({heuristic}(), pruning=stubborn_sets_{pruning}())".format(**locals())]) - for heuristic in ["blind"] - for pruning in ["ec", "queue", "simple"] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER) -exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER) -#exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER) -exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER) - -exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue781/v2-v3-combined.py b/experiments/issue781/v2-v3-combined.py deleted file mode 100755 index 61dab0e9b7..0000000000 --- a/experiments/issue781/v2-v3-combined.py +++ /dev/null @@ -1,54 +0,0 @@ -#! /usr/bin/env python - -from collections import defaultdict -import os.path -import sys - -import common_setup - -FILE = os.path.abspath(__file__) -DIR = os.path.dirname(FILE) - -FILENAME = os.path.splitext(os.path.basename(__file__))[0] -EXPS = os.path.join(DIR, "data") -EXPPATH = os.path.join(EXPS, FILENAME) - -def remove_file(filename): - try: - os.remove(filename) - except OSError: - pass - -exp = common_setup.IssueExperiment() -exp.steps = [] -exp.add_step( - 'remove-combined-properties', - remove_file, - os.path.join(exp.eval_dir, "properties")) - -exp.add_fetcher(os.path.join(EXPS, "issue781-v2-eval"), merge=True) -exp.add_fetcher(os.path.join(EXPS, "issue781-v3-queue-ratio-eval"), merge=True) - -ATTRIBUTES = [ - "cost", "error", "run_dir", "search_start_time", - "search_start_memory", "coverage", "expansions_until_last_jump", - "total_time", "initial_h_value", "search_time", "abstractions", - "stored_heuristics", "stored_values", "stored_lookup_tables", -] -exp.add_absolute_report_step( - filter_algorithm=[ - "issue781-v2-blind-ec-min-0.0", - "issue781-v2-blind-ec-min-0.2", - "issue781-v2-blind-queue-min-0.0", - "issue781-v3-blind-queue-min-0.2", - "issue781-v2-blind-simple-min-0.0", - "issue781-v2-blind-simple-min-0.2", - "issue781-v2-lmcut-ec-min-0.0", - "issue781-v2-lmcut-ec-min-0.2", - "issue781-v2-lmcut-queue-min-0.0", - "issue781-v3-lmcut-queue-min-0.2", - "issue781-v2-lmcut-simple-min-0.0", - "issue781-v2-lmcut-simple-min-0.2"], - attributes=common_setup.IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["time_for_pruning_operators"]) - -exp.run_steps() diff --git a/experiments/issue781/v2.py b/experiments/issue781/v2.py deleted file mode 100755 index cec8931f5c..0000000000 --- a/experiments/issue781/v2.py +++ /dev/null @@ -1,50 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue781-v2"] -CONFIGS = [ - IssueConfig( - "{heuristic}-{pruning}-min-{min_ratio}".format(**locals()), - ["--search", "astar({heuristic}(), pruning=stubborn_sets_{pruning}(min_required_pruning_ratio={min_ratio}))".format(**locals())]) - for heuristic in ["blind", "lmcut"] - for pruning in ["ec", "queue", "simple"] - for min_ratio in [0.0, 0.2] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER) -exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER) -#exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER) -exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER) -exp.add_parser('pruning_parser', os.path.join(common_setup.get_script_dir(), "parser.py")) - -exp.add_absolute_report_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["time_for_pruning_operators"]) -#exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue781/v3-queue-ratio.py b/experiments/issue781/v3-queue-ratio.py deleted file mode 100755 index 9cb779cfb0..0000000000 --- a/experiments/issue781/v3-queue-ratio.py +++ /dev/null @@ -1,50 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue781-v3"] -CONFIGS = [ - IssueConfig( - "{heuristic}-{pruning}-min-{min_ratio}".format(**locals()), - ["--search", "astar({heuristic}(), pruning=stubborn_sets_{pruning}(min_required_pruning_ratio={min_ratio}))".format(**locals())]) - for heuristic in ["blind", "lmcut"] - for pruning in ["queue"] - for min_ratio in [0.2] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER) -exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER) -#exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER) -exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER) -exp.add_parser('pruning_parser', os.path.join(common_setup.get_script_dir(), "parser.py")) - -exp.add_absolute_report_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["time_for_pruning_operators"]) -#exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue781/v4-extensions.py b/experiments/issue781/v4-extensions.py deleted file mode 100755 index d6102cd2d0..0000000000 --- a/experiments/issue781/v4-extensions.py +++ /dev/null @@ -1,54 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue781-v3", "issue781-v4-wss", "issue781-v4-varmark", "issue781-v4-opportunistic"] -CONFIGS = [ - IssueConfig( - "{heuristic}-{pruning}".format(**locals()), - ["--search", "astar({heuristic}(), pruning=stubborn_sets_{pruning}())".format(**locals())]) - for heuristic in ["blind", "lmcut"] - for pruning in ["queue"] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser('lab_driver_parser', exp.LAB_DRIVER_PARSER) -exp.add_parser('exitcode_parser', exp.EXITCODE_PARSER) -#exp.add_parser('translator_parser', exp.TRANSLATOR_PARSER) -exp.add_parser('single_search_parser', exp.SINGLE_SEARCH_PARSER) -exp.add_parser('pruning_parser', os.path.join(common_setup.get_script_dir(), "parser.py")) - -exp.add_absolute_report_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["time_for_pruning_operators"]) -exp.add_report(common_setup.ComparativeReport([ - ("issue781-v3-{heuristic}-queue".format(**locals()), - "issue781-v4-{extension}-{heuristic}-queue".format(**locals())) - for heuristic in ["blind", "lmcut"] - for extension in ["wss", "varmark", "opportunistic"]], - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + ["time_for_pruning_operators"])) - -exp.run_steps() diff --git a/experiments/issue786/common_setup.py b/experiments/issue786/common_setup.py deleted file mode 100644 index 338314a650..0000000000 --- a/experiments/issue786/common_setup.py +++ /dev/null @@ -1,384 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue786/relativescatter.py b/experiments/issue786/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue786/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue786/v1.py b/experiments/issue786/v1.py deleted file mode 100755 index 55784c3f3c..0000000000 --- a/experiments/issue786/v1.py +++ /dev/null @@ -1,64 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue786-base", "issue786-v1"] -DRIVER_OPTIONS = ["--overall-time-limit", "5m"] -CONFIGS = [ - IssueConfig( - "cegar", - ["--search", "astar(cegar())"], - driver_options=DRIVER_OPTIONS), - IssueConfig( - "ipdb", - ["--search", "astar(ipdb())"], - driver_options=DRIVER_OPTIONS) -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue786/v2.py b/experiments/issue786/v2.py deleted file mode 100755 index 69ab74b034..0000000000 --- a/experiments/issue786/v2.py +++ /dev/null @@ -1,68 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue786-v2-base", "issue786-v2"] -DRIVER_OPTIONS = ["--overall-time-limit", "5m"] -CONFIGS = [ - IssueConfig( - "diverse-potentials", - ["--search", "astar(diverse_potentials())"], - driver_options=DRIVER_OPTIONS), - IssueConfig( - "sample-potentials", - ["--search", "astar(sample_based_potentials())"], - driver_options=DRIVER_OPTIONS), - IssueConfig( - "ipdb", - ["--search", "astar(ipdb())"], - driver_options=DRIVER_OPTIONS) -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue786/v3.py b/experiments/issue786/v3.py deleted file mode 100755 index 94783c6de6..0000000000 --- a/experiments/issue786/v3.py +++ /dev/null @@ -1,68 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue786-v3-base", "issue786-v3"] -DRIVER_OPTIONS = ["--overall-time-limit", "5m"] -CONFIGS = [ - IssueConfig( - "diverse-potentials", - ["--search", "astar(diverse_potentials())"], - driver_options=DRIVER_OPTIONS), - IssueConfig( - "cegar", - ["--search", "astar(cegar())"], - driver_options=DRIVER_OPTIONS), - IssueConfig( - "ipdb", - ["--search", "astar(ipdb())"], - driver_options=DRIVER_OPTIONS) -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue791/common_setup.py b/experiments/issue791/common_setup.py deleted file mode 100644 index 687019c482..0000000000 --- a/experiments/issue791/common_setup.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue791/relativescatter.py b/experiments/issue791/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue791/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue791/v1-opt.py b/experiments/issue791/v1-opt.py deleted file mode 100755 index 5d0a99f61b..0000000000 --- a/experiments/issue791/v1-opt.py +++ /dev/null @@ -1,54 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue791-base", "issue791-v1"] -CONFIGS = [ - IssueConfig( - 'blind-debug', ['--search', 'astar(blind())'], - build_options=["debug32"], - driver_options=["--build", "debug32", "--overall-time-limit", "5m"] - ), - IssueConfig( - 'blind-release', ['--search', 'astar(blind())'], - build_options=["release32"], - driver_options=["--build", "release32", "--overall-time-limit", "5m"] - ), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue791/v2-opt.py b/experiments/issue791/v2-opt.py deleted file mode 100755 index c8a33f2de1..0000000000 --- a/experiments/issue791/v2-opt.py +++ /dev/null @@ -1,54 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue791-base", "issue791-v2"] -CONFIGS = [ - IssueConfig( - 'blind-debug', ['--search', 'astar(blind())'], - build_options=["debug32"], - driver_options=["--build", "debug32", "--overall-time-limit", "5m"] - ), - IssueConfig( - 'blind-release', ['--search', 'astar(blind())'], - build_options=["release32"], - driver_options=["--build", "release32", "--overall-time-limit", "5m"] - ), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue792/common_setup.py b/experiments/issue792/common_setup.py deleted file mode 100644 index 687019c482..0000000000 --- a/experiments/issue792/common_setup.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue792/relativescatter.py b/experiments/issue792/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue792/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue792/v1-opt.py b/experiments/issue792/v1-opt.py deleted file mode 100755 index 5162ec8a46..0000000000 --- a/experiments/issue792/v1-opt.py +++ /dev/null @@ -1,46 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue792-base", "issue792-v1"] -CONFIGS = [ - IssueConfig('blind', ['--search', 'astar(blind())']), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue794/axiom_time_parser.py b/experiments/issue794/axiom_time_parser.py deleted file mode 100755 index 5621e709d3..0000000000 --- a/experiments/issue794/axiom_time_parser.py +++ /dev/null @@ -1,10 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -print 'Running axiom evaluation time parser' -parser = Parser() -parser.add_pattern('axiom_time_inner', r'AxiomEvaluator time in inner evaluate: (.+)', type=float) -parser.add_pattern('axiom_time_outer', r'AxiomEvaluator time in outer evaluate: (.+)', type=float) - -parser.parse() diff --git a/experiments/issue794/common_setup.py b/experiments/issue794/common_setup.py deleted file mode 100644 index 687019c482..0000000000 --- a/experiments/issue794/common_setup.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue794/relativescatter.py b/experiments/issue794/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue794/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue794/v1-opt.py b/experiments/issue794/v1-opt.py deleted file mode 100755 index a7e2bfe6b8..0000000000 --- a/experiments/issue794/v1-opt.py +++ /dev/null @@ -1,62 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue794-base", "issue794-v1"] -CONFIGS = [ - IssueConfig('blind', ['--search', 'astar(blind())']), -] -SUITE = [ - 'assembly', 'miconic-fulladl', 'openstacks', - 'openstacks-sat08-adl', 'optical-telegraphs', 'philosophers', - 'psr-large', 'psr-middle', 'trucks', -] -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("axiom_time_parser.py") - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_comparison_table_step(attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["axiom_time_inner", "axiom_time_outer"]) - -for attribute in ["axiom_time_inner", "axiom_time_outer"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue794/v2-opt.py b/experiments/issue794/v2-opt.py deleted file mode 100755 index b30af5df3c..0000000000 --- a/experiments/issue794/v2-opt.py +++ /dev/null @@ -1,62 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue794-base", "issue794-v2"] -CONFIGS = [ - IssueConfig('blind', ['--search', 'astar(blind())']), -] -SUITE = [ - 'assembly', 'miconic-fulladl', 'openstacks', - 'openstacks-sat08-adl', 'optical-telegraphs', 'philosophers', - 'psr-large', 'psr-middle', 'trucks', -] -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("axiom_time_parser.py") - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_comparison_table_step(attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["axiom_time_inner", "axiom_time_outer"]) - -for attribute in ["axiom_time_inner", "axiom_time_outer"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue794/v3-opt.py b/experiments/issue794/v3-opt.py deleted file mode 100755 index fc7268185e..0000000000 --- a/experiments/issue794/v3-opt.py +++ /dev/null @@ -1,62 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue794-base", "issue794-v3"] -CONFIGS = [ - IssueConfig('blind', ['--search', 'astar(blind())']), -] -SUITE = [ - 'assembly', 'miconic-fulladl', 'openstacks', - 'openstacks-sat08-adl', 'optical-telegraphs', 'philosophers', - 'psr-large', 'psr-middle', 'trucks', -] -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("axiom_time_parser.py") - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_comparison_table_step(attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["axiom_time_inner", "axiom_time_outer"]) - -for attribute in ["axiom_time_inner", "axiom_time_outer"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue794/v4-opt.py b/experiments/issue794/v4-opt.py deleted file mode 100755 index 4efed62c62..0000000000 --- a/experiments/issue794/v4-opt.py +++ /dev/null @@ -1,62 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue794-v1", "issue794-v4"] -CONFIGS = [ - IssueConfig('blind', ['--search', 'astar(blind())']), -] -SUITE = [ - 'assembly', 'miconic-fulladl', 'openstacks', - 'openstacks-sat08-adl', 'optical-telegraphs', 'philosophers', - 'psr-large', 'psr-middle', 'trucks', -] -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("axiom_time_parser.py") - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_suite(BENCHMARKS_DIR, SUITE) -exp.add_comparison_table_step(attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["axiom_time_inner", "axiom_time_outer"]) - -for attribute in ["axiom_time_inner", "axiom_time_outer"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain"), - ), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS) - ) - -exp.run_steps() diff --git a/experiments/issue803/common_setup.py b/experiments/issue803/common_setup.py deleted file mode 100644 index 338314a650..0000000000 --- a/experiments/issue803/common_setup.py +++ /dev/null @@ -1,384 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue803/relativescatter.py b/experiments/issue803/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue803/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue803/v1.py b/experiments/issue803/v1.py deleted file mode 100755 index ba45a41c3d..0000000000 --- a/experiments/issue803/v1.py +++ /dev/null @@ -1,65 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue803-base", "issue803-v1"] -if common_setup.is_test_run(): - BUILDS = ["release32"] -else: - BUILDS = ["debug32", "release32", "debug64", "release64"] -CONFIGS = [ - IssueConfig( - build + "-blind", - ["--search", "astar(blind())"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for build in BUILDS -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/align-hmax.py b/experiments/issue814/align-hmax.py deleted file mode 100755 index 6281b2cab9..0000000000 --- a/experiments/issue814/align-hmax.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v14", "issue814-v16"] -if common_setup.is_test_run(): - BUILDS = ["release64"] -else: - BUILDS = ["release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), - "--search", "astar(h)"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for build in BUILDS - for heuristic in ["hmax"] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/align-sat.py b/experiments/issue814/align-sat.py deleted file mode 100755 index 0bd96285a4..0000000000 --- a/experiments/issue814/align-sat.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v14", "issue814-v16"] -if common_setup.is_test_run(): - BUILDS = ["release64"] -else: - BUILDS = ["release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), - "--search", "lazy_greedy([h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for build in BUILDS - for heuristic in ["add", "ff"] -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/common_setup.py b/experiments/issue814/common_setup.py deleted file mode 100644 index f64c5c15b3..0000000000 --- a/experiments/issue814/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue814/parser.py b/experiments/issue814/parser.py deleted file mode 100755 index 83a9a0d885..0000000000 --- a/experiments/issue814/parser.py +++ /dev/null @@ -1,23 +0,0 @@ -#! /usr/bin/env python - -""" -======================================================================== -Simplifying 29928 unary operators... done! [17292 unary operators] -time to simplify: 0.022623s -======================================================================== - -=> Here we want to extract 29928 (simplify_before), 17292 (simplify_after) and -0.022623s (simplify_time). -""" - -import re - -from lab.parser import Parser - - -print 'Running custom parser' -parser = Parser() -parser.add_pattern('simplify_before', r'^Simplifying (\d+) unary operators\.\.\. done! \[\d+ unary operators\]$', type=int) -parser.add_pattern('simplify_after', r'^Simplifying \d+ unary operators\.\.\. done! \[(\d+) unary operators\]$', type=int) -parser.add_pattern('simplify_time', r'^time to simplify: (.+)s$', type=float) -parser.parse() diff --git a/experiments/issue814/relativescatter.py b/experiments/issue814/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue814/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue814/v1.py b/experiments/issue814/v1.py deleted file mode 100755 index ff77e47e30..0000000000 --- a/experiments/issue814/v1.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-base", "issue814-v1"] -if common_setup.is_test_run(): - BUILDS = ["release32"] -else: - BUILDS = ["debug64", "release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for build in BUILDS - for heuristic in ["add", "ff"] -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="malte.helmert@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v10-base-30min.py b/experiments/issue814/v10-base-30min.py deleted file mode 100755 index 57d224b2e4..0000000000 --- a/experiments/issue814/v10-base-30min.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-base", "issue814-v10"] -if common_setup.is_test_run(): - BUILDS = ["release32"] -else: - BUILDS = ["debug64", "release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for build in BUILDS - for heuristic in ["add", "ff"] -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="malte.helmert@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v10.py b/experiments/issue814/v10.py deleted file mode 100755 index cce1a1b456..0000000000 --- a/experiments/issue814/v10.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v9", "issue814-v10"] -if common_setup.is_test_run(): - BUILDS = ["release32"] -else: - BUILDS = ["debug64", "release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for build in BUILDS - for heuristic in ["add", "ff"] -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="malte.helmert@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v11-base-hmax-30min.py b/experiments/issue814/v11-base-hmax-30min.py deleted file mode 100755 index 0de074b03f..0000000000 --- a/experiments/issue814/v11-base-hmax-30min.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-base", "issue814-v11"] -if common_setup.is_test_run(): - BUILDS = ["release32"] -else: - BUILDS = ["debug64", "release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), - "--search", "astar(h)"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for build in BUILDS - for heuristic in ["hmax"] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="malte.helmert@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v13.py b/experiments/issue814/v13.py deleted file mode 100755 index 964aed8bc5..0000000000 --- a/experiments/issue814/v13.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v12", "issue814-v13"] -if common_setup.is_test_run(): - BUILDS = ["release64"] -else: - BUILDS = ["release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), - "--search", "lazy_greedy([h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for build in BUILDS - for heuristic in ["add", "ff"] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v15-hadd.py b/experiments/issue814/v15-hadd.py deleted file mode 100755 index 17e3aed7d5..0000000000 --- a/experiments/issue814/v15-hadd.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v13", "issue814-v14", "issue814-v15"] -if common_setup.is_test_run(): - BUILDS = ["release64"] -else: - BUILDS = ["release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), - "--search", "lazy_greedy([h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for build in BUILDS - for heuristic in ["add"] -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v15-hmax.py b/experiments/issue814/v15-hmax.py deleted file mode 100755 index c274a483e0..0000000000 --- a/experiments/issue814/v15-hmax.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v13", "issue814-v14", "issue814-v15"] -if common_setup.is_test_run(): - BUILDS = ["release64"] -else: - BUILDS = ["release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), - "--search", "astar(h)"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for build in BUILDS - for heuristic in ["hmax"] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v15.py b/experiments/issue814/v15.py deleted file mode 100755 index 5578ec6518..0000000000 --- a/experiments/issue814/v15.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v13", "issue814-v14", "issue814-v15"] -if common_setup.is_test_run(): - BUILDS = ["release64"] -else: - BUILDS = ["release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), - "--search", "lazy_greedy([h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for build in BUILDS - for heuristic in ["ff"] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v16-hmax.py b/experiments/issue814/v16-hmax.py deleted file mode 100755 index 127c216bca..0000000000 --- a/experiments/issue814/v16-hmax.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v15", "issue814-v16"] -if common_setup.is_test_run(): - BUILDS = ["release64"] -else: - BUILDS = ["release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), - "--search", "astar(h)"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for build in BUILDS - for heuristic in ["hmax"] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v16-sat.py b/experiments/issue814/v16-sat.py deleted file mode 100755 index ae5438efdd..0000000000 --- a/experiments/issue814/v16-sat.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v15", "issue814-v16"] -if common_setup.is_test_run(): - BUILDS = ["release64"] -else: - BUILDS = ["release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), - "--search", "lazy_greedy([h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for build in BUILDS - for heuristic in ["add", "ff"] -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v17-hmax.py b/experiments/issue814/v17-hmax.py deleted file mode 100755 index ab9dd3a748..0000000000 --- a/experiments/issue814/v17-hmax.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v12-base", "issue814-v16", "issue814-v17"] -if common_setup.is_test_run(): - BUILDS = ["release64"] -else: - BUILDS = ["release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), - "--search", "astar(h)"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for build in BUILDS - for heuristic in ["hmax"] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v17-sat.py b/experiments/issue814/v17-sat.py deleted file mode 100755 index b9989f986f..0000000000 --- a/experiments/issue814/v17-sat.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v12-base", "issue814-v16", "issue814-v17"] -if common_setup.is_test_run(): - BUILDS = ["release64"] -else: - BUILDS = ["release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), - "--search", "lazy_greedy([h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for build in BUILDS - for heuristic in ["add", "ff"] -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v1_7_10.py b/experiments/issue814/v1_7_10.py deleted file mode 100755 index 0dce495382..0000000000 --- a/experiments/issue814/v1_7_10.py +++ /dev/null @@ -1,80 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -"""This experiment is only here for completeness because it was referred -to on the tracker. Don't use it as given here: the different experiments -that are aggregated here used different timeouts, so the results will be -misleading.""" - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-base", "issue814-v1", "issue814-v7", "issue814-v10"] -if common_setup.is_test_run(): - BUILDS = ["release32"] -else: - BUILDS = ["debug64", "release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for build in BUILDS - for heuristic in ["add", "ff"] -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="malte.helmert@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -## The following steps are to actually run the experiment. -# exp.add_step('build', exp.build) -# exp.add_step('start', exp.start_runs) -# exp.add_fetcher(name='fetch') - -## Alternatively, the following steps fetch the results from the other experiments. -exp.add_fetcher('data/issue814-v1-eval') -exp.add_fetcher('data/issue814-v7-eval') -exp.add_fetcher('data/issue814-v10-eval') - -exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v2.py b/experiments/issue814/v2.py deleted file mode 100755 index 5563de95a0..0000000000 --- a/experiments/issue814/v2.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v1", "issue814-v2"] -if common_setup.is_test_run(): - BUILDS = ["release32"] -else: - BUILDS = ["debug64", "release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for build in BUILDS - for heuristic in ["add", "ff"] -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="malte.helmert@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v3.py b/experiments/issue814/v3.py deleted file mode 100755 index a86fd2c02c..0000000000 --- a/experiments/issue814/v3.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v2", "issue814-v3"] -if common_setup.is_test_run(): - BUILDS = ["release32"] -else: - BUILDS = ["debug64", "release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for build in BUILDS - for heuristic in ["add", "ff"] -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="malte.helmert@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v4.py b/experiments/issue814/v4.py deleted file mode 100755 index 4d7233e9b1..0000000000 --- a/experiments/issue814/v4.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-base", "issue814-v4"] -if common_setup.is_test_run(): - BUILDS = ["release32"] -else: - BUILDS = ["debug64", "release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for build in BUILDS - for heuristic in ["add", "ff"] -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="malte.helmert@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v5.py b/experiments/issue814/v5.py deleted file mode 100755 index 5afa13c1fb..0000000000 --- a/experiments/issue814/v5.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v4", "issue814-v5"] -if common_setup.is_test_run(): - BUILDS = ["release32"] -else: - BUILDS = ["debug64", "release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for build in BUILDS - for heuristic in ["add", "ff"] -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="malte.helmert@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v6.py b/experiments/issue814/v6.py deleted file mode 100755 index b190dc16a8..0000000000 --- a/experiments/issue814/v6.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v5", "issue814-v6"] -if common_setup.is_test_run(): - BUILDS = ["release32"] -else: - BUILDS = ["debug64", "release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for build in BUILDS - for heuristic in ["add", "ff"] -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="malte.helmert@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v7.py b/experiments/issue814/v7.py deleted file mode 100755 index 9b206c475d..0000000000 --- a/experiments/issue814/v7.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v6", "issue814-v7"] -if common_setup.is_test_run(): - BUILDS = ["release32"] -else: - BUILDS = ["debug64", "release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for build in BUILDS - for heuristic in ["add", "ff"] -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="malte.helmert@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v8.py b/experiments/issue814/v8.py deleted file mode 100755 index 9226dec0be..0000000000 --- a/experiments/issue814/v8.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v7", "issue814-v8"] -if common_setup.is_test_run(): - BUILDS = ["release32"] -else: - BUILDS = ["debug64", "release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for build in BUILDS - for heuristic in ["add", "ff"] -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="malte.helmert@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue814/v9.py b/experiments/issue814/v9.py deleted file mode 100755 index 52417a5b61..0000000000 --- a/experiments/issue814/v9.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue814-v8", "issue814-v9"] -if common_setup.is_test_run(): - BUILDS = ["release32"] -else: - BUILDS = ["debug64", "release32", "release64"] -CONFIGS = [ - IssueConfig( - build + "-{heuristic}".format(**locals()), - ["--evaluator", "h={heuristic}()".format(**locals()), "--search", "lazy_greedy([h],preferred=[h])"], - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for build in BUILDS - for heuristic in ["add", "ff"] -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="malte.helmert@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step( - attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["simplify_before", "simplify_after", "simplify_time"]) - -for attribute in ["memory", "total_time"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue835/common_setup.py b/experiments/issue835/common_setup.py deleted file mode 100644 index 687019c482..0000000000 --- a/experiments/issue835/common_setup.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue835/relativescatter.py b/experiments/issue835/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue835/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue835/v1.py b/experiments/issue835/v1.py deleted file mode 100755 index b91065ce2c..0000000000 --- a/experiments/issue835/v1.py +++ /dev/null @@ -1,43 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue835-base", "issue835-v1"] -CONFIGS = [ - IssueConfig('lama-first', [], driver_options=['--alias', 'lama-first', '--overall-time-limit', '5m']), -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue837/common_setup.py b/experiments/issue837/common_setup.py deleted file mode 100644 index 338314a650..0000000000 --- a/experiments/issue837/common_setup.py +++ /dev/null @@ -1,384 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue837/relativescatter.py b/experiments/issue837/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue837/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue837/v1-opt.py b/experiments/issue837/v1-opt.py deleted file mode 100755 index 9bbf8d99aa..0000000000 --- a/experiments/issue837/v1-opt.py +++ /dev/null @@ -1,81 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue837-base", "issue837-v1"] -BUILDS = ["debug64"] -SEARCHES = [ - ("bjolp", [ - "--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)", - "--search", - "astar(lmc,lazy_evaluator=lmc)"]), - ("blind", ["--search", "astar(blind())"]), - ("cegar", ["--search", "astar(cegar())"]), - ("divpot", ["--search", "astar(diverse_potentials())"]), - ("ipdb", ["--search", "astar(ipdb())"]), - ("lmcut", ["--search", "astar(lmcut())"]), - ("mas", - ["--search", "astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false)," - " merge_strategy=merge_sccs(order_of_sccs=topological," - " merge_selector=score_based_filtering(scoring_functions=[goal_relevance, dfp, total_order]))," - " label_reduction=exact(before_shrinking=true, before_merging=false)," - " max_states=50000, threshold_before_merge=1))"]), - ("occ", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"]), - ("blind-sss-simple", ["--search", "astar(blind(), pruning=stubborn_sets_simple())"]), - ("blind-sss-ec", ["--search", "astar(blind(), pruning=stubborn_sets_ec())"]), - ("h2", ["--search", "astar(hm(m=2))"]), - ("hmax", ["--search", "astar(hmax())"]), -] -CONFIGS = [ - IssueConfig( - "-".join([search_nick, build]), - search, - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for build in BUILDS - for search_nick, search in SEARCHES -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue837/v1-sat.py b/experiments/issue837/v1-sat.py deleted file mode 100755 index 3793b5db22..0000000000 --- a/experiments/issue837/v1-sat.py +++ /dev/null @@ -1,92 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue837-base", "issue837-v1"] -BUILDS = ["debug64"] -CONFIG_DICT = { - "eager_greedy_ff": [ - "--heuristic", - "h=ff()", - "--search", - "eager_greedy([h], preferred=[h])"], - "eager_greedy_cea": [ - "--heuristic", - "h=cea()", - "--search", - "eager_greedy([h], preferred=[h])"], - "lazy_greedy_add": [ - "--heuristic", - "h=add()", - "--search", - "lazy_greedy([h], preferred=[h])"], - "lazy_greedy_cg": [ - "--heuristic", - "h=cg()", - "--search", - "lazy_greedy([h], preferred=[h])"], - "lama-first": [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false)"""], - "ff-typed": [ - "--heuristic", "hff=ff()", - "--search", - "lazy(alt([single(hff), single(hff, pref_only=true)," - " type_based([hff, g()])], boost=1000)," - " preferred=[hff], cost_type=one)"], -} -CONFIGS = [ - IssueConfig( - "-".join([config_nick, build]), - config, - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "5m"]) - for build in BUILDS - for config_nick, config in CONFIG_DICT.items() -] -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue838/common_setup.py b/experiments/issue838/common_setup.py deleted file mode 100644 index f64c5c15b3..0000000000 --- a/experiments/issue838/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue838/relativescatter.py b/experiments/issue838/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue838/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue838/v1-use-cache.py b/experiments/issue838/v1-use-cache.py deleted file mode 100755 index 66b553edba..0000000000 --- a/experiments/issue838/v1-use-cache.py +++ /dev/null @@ -1,75 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue838-v1"] -BUILDS = ["release32", "release64"] -CONFIG_NICKS = [ - ("lazy-greedy-cg-use-cache-{use_cache}".format(**locals()), [ - "--heuristic", "h=cg(use_cache={use_cache})".format(**locals()), - "--search", "lazy_greedy([h],preferred=[h])"]) - for use_cache in [True, False] -] -CONFIGS = [ - IssueConfig( - build + "-" + config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES -algorithm_pairs = [ - ("issue838-v1-{build}-lazy-greedy-cg-use-cache-False".format(**locals()), - "issue838-v1-{build}-lazy-greedy-cg-use-cache-True".format(**locals()), - "Diff ({build})".format(**locals())) - for build in BUILDS] -exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="{SCRIPT_NAME}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue838/v2-cache-size.py b/experiments/issue838/v2-cache-size.py deleted file mode 100755 index 56be4d47a8..0000000000 --- a/experiments/issue838/v2-cache-size.py +++ /dev/null @@ -1,68 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue838-v2"] -CONFIG_NICKS = [ - ("lazy-greedy-cg-cache-size-{cache_size}".format(**locals()), [ - "--heuristic", "h=cg(max_cache_size={cache_size})".format(**locals()), - "--search", "lazy_greedy([h],preferred=[h])"]) - for cache_size in ["0", "1K", "1M", "2M", "5M", "10M", "20M", "50M", "100M", "1000M"] -] -CONFIGS = [ - IssueConfig(config_nick, config) - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -#attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES -#algorithm_pairs = [ -# ("issue838-v1-{build}-lazy-greedy-cg-use-cache-False".format(**locals()), -# "issue838-v1-{build}-lazy-greedy-cg-use-cache-True".format(**locals()), -# "Diff ({build})".format(**locals())) -# for build in BUILDS] -#exp.add_report( -# ComparativeReport(algorithm_pairs, attributes=attributes), -# name="{SCRIPT_NAME}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue839/common_setup.py b/experiments/issue839/common_setup.py deleted file mode 100644 index f64c5c15b3..0000000000 --- a/experiments/issue839/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue839/relativescatter.py b/experiments/issue839/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue839/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue839/v1-lama-first.py b/experiments/issue839/v1-lama-first.py deleted file mode 100755 index 172c338666..0000000000 --- a/experiments/issue839/v1-lama-first.py +++ /dev/null @@ -1,93 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue839-v1"] -BUILDS = ["release32"] -CONFIG_NICKS = [ - ("lama-first-syn", [ - "--heuristic", - """hlm=lama_synergy(lm_rhw(reasonable_orders=true), - transform=adapt_costs(one))""", - "--heuristic", "hff=ff_synergy(hlm)", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false)"""]), - ("lama-first-no-syn-pref-false", [ - "--heuristic", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true), transform=adapt_costs(one), pref=false)", - "--heuristic", "hff=ff(transform=adapt_costs(one))", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false)"""]), - ("lama-first-no-syn-pref-true", [ - "--heuristic", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true), transform=adapt_costs(one), pref=true)", - "--heuristic", "hff=ff(transform=adapt_costs(one))", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false)"""]), -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - - -for build in BUILDS: - algorithm_pairs = [ - ("{rev}-{nick1}".format(**locals()), - "{rev}-{nick2}".format(**locals()), - "Diff ({rev})".format(**locals())) - for (nick1, _), (nick2, _) in itertools.combinations(CONFIG_NICKS, 2)] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue839-{nick1}-vs-{nick2}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue839/v1-lama.py b/experiments/issue839/v1-lama.py deleted file mode 100755 index 0c37308377..0000000000 --- a/experiments/issue839/v1-lama.py +++ /dev/null @@ -1,136 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue839-v1"] -BUILDS = ["release32"] -CONFIG_NICKS = [ - ("lama-syn", [ - "--if-unit-cost", - "--evaluator", - "hlm=lama_synergy(lm_rhw(reasonable_orders=true))", - "--evaluator", "hff=ff_synergy(hlm)", - "--search", """iterated([ - lazy_greedy([hff,hlm],preferred=[hff,hlm]), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1) - ],repeat_last=true,continue_on_fail=true)""", - "--if-non-unit-cost", - "--evaluator", - "hlm1=lama_synergy(lm_rhw(reasonable_orders=true),transform=adapt_costs(one))", - "--evaluator", "hff1=ff_synergy(hlm1)", - "--evaluator", - "hlm2=lama_synergy(lm_rhw(reasonable_orders=true),transform=adapt_costs(plusone))", - "--evaluator", "hff2=ff_synergy(hlm2)", - "--search", """iterated([ - lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], - cost_type=one,reopen_closed=false), - lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2], - reopen_closed=false), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1) - ],repeat_last=true,continue_on_fail=true)""", - "--always"]), -] + [ - ("lama-no-syn-pref-{pref}".format(**locals()), [ - "--if-unit-cost", - "--evaluator", - "hlm=lmcount(lm_rhw(reasonable_orders=true), pref={pref})".format(**locals()), - "--evaluator", "hff=ff()", - "--search", """iterated([ - lazy_greedy([hff,hlm],preferred=[hff,hlm]), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1) - ],repeat_last=true,continue_on_fail=true)""", - "--if-non-unit-cost", - "--evaluator", - "hlm1=lmcount(lm_rhw(reasonable_orders=true), transform=adapt_costs(one), pref={pref})".format(**locals()), - "--evaluator", "hff1=ff(transform=adapt_costs(one))", - "--evaluator", - "hlm2=lmcount(lm_rhw(reasonable_orders=true), transform=adapt_costs(plusone), pref={pref})".format(**locals()), - "--evaluator", "hff2=ff(transform=adapt_costs(plusone))", - "--search", """iterated([ - lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], - cost_type=one,reopen_closed=false), - lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2], - reopen_closed=false), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1) - ],repeat_last=true,continue_on_fail=true)""", - "--always"]) - for pref in [True, False] -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - - -for build in BUILDS: - algorithm_pairs = [ - ("{rev}-{nick1}".format(**locals()), - "{rev}-{nick2}".format(**locals()), - "Diff ({rev})".format(**locals())) - for (nick1, _), (nick2, _) in itertools.combinations(CONFIG_NICKS, 2)] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue839-{nick1}-vs-{nick2}".format(**locals())) - -exp.run_steps() diff --git a/experiments/issue846/common_setup.py b/experiments/issue846/common_setup.py deleted file mode 100644 index f64c5c15b3..0000000000 --- a/experiments/issue846/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue846/relativescatter.py b/experiments/issue846/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue846/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue846/v1-bjolp.py b/experiments/issue846/v1-bjolp.py deleted file mode 100755 index 76c681f8c2..0000000000 --- a/experiments/issue846/v1-bjolp.py +++ /dev/null @@ -1,67 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue846-v1"] -BUILDS = ["release32"] -CONFIG_NICKS = [ - ("bjolp-pref-{pref}".format(**locals()), [ - "--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, preferred_operators={pref})".format(**locals()), - "--search", - "astar(lmc,lazy_evaluator=lmc)"]) - for pref in ["none", "simple"] -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue846/v1-lama-first-ignore-pref.py b/experiments/issue846/v1-lama-first-ignore-pref.py deleted file mode 100755 index e22e93df79..0000000000 --- a/experiments/issue846/v1-lama-first-ignore-pref.py +++ /dev/null @@ -1,76 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue846-v1"] -BUILDS = ["release32"] -CONFIG_NICKS = [ - ("lama-first-pref-{pref}".format(**locals()), [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true), transform=adapt_costs(one), preferred_operators={pref})".format(**locals()), - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", "lazy_greedy([hff,hlm],preferred=[hff,hlm]," - "cost_type=one,reopen_closed=false)"]) - for pref in ["none", "simple"] -] + [ - ("lama-first-pref-{pref}-ignore".format(**locals()), [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true), transform=adapt_costs(one), preferred_operators={pref})".format(**locals()), - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", "lazy_greedy([hff,hlm],preferred=[hff]," - "cost_type=one,reopen_closed=false)"]) - for pref in ["simple"] -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue846/v1-lama-first-no-ff.py b/experiments/issue846/v1-lama-first-no-ff.py deleted file mode 100755 index 639a7fc978..0000000000 --- a/experiments/issue846/v1-lama-first-no-ff.py +++ /dev/null @@ -1,67 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue846-v1"] -BUILDS = ["release32"] -CONFIG_NICKS = [ - ("{index}-lama-first-no-ff-pref-{pref}".format(**locals()), [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true), transform=adapt_costs(one), preferred_operators={pref})".format(**locals()), - "--search", "lazy_greedy([hlm],preferred=[hlm]," - "cost_type=one,reopen_closed=false)"]) - for index, pref in enumerate(["none", "simple", "all"]) -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue846/v1-lama-first.py b/experiments/issue846/v1-lama-first.py deleted file mode 100755 index d281a14a31..0000000000 --- a/experiments/issue846/v1-lama-first.py +++ /dev/null @@ -1,68 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue846-v1"] -BUILDS = ["release32"] -CONFIG_NICKS = [ - ("lama-first-pref-{pref}".format(**locals()), [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true), transform=adapt_costs(one), preferred_operators={pref})".format(**locals()), - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", "lazy_greedy([hff,hlm],preferred=[hff,hlm]," - "cost_type=one,reopen_closed=false)"]) - for pref in ["none", "simple", "all"] -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step(filter_algorithm=["issue846-v1-lama-first-pref-none", "issue846-v1-lama-first-pref-simple", "issue846-v1-lama-first-pref-all"]) -#exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue846/v1-lama.py b/experiments/issue846/v1-lama.py deleted file mode 100755 index 701aed9a32..0000000000 --- a/experiments/issue846/v1-lama.py +++ /dev/null @@ -1,92 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue846-v1"] -BUILDS = ["release32"] -CONFIG_NICKS = [ - ("lama-no-syn-pref-{pref}".format(**locals()), [ - "--if-unit-cost", - "--evaluator", - "hlm=lmcount(lm_rhw(reasonable_orders=true), preferred_operators={pref})".format(**locals()), - "--evaluator", "hff=ff()", - "--search", """iterated([ - lazy_greedy([hff,hlm],preferred=[hff,hlm]), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=5), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=3), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=2), - lazy_wastar([hff,hlm],preferred=[hff,hlm],w=1) - ],repeat_last=true,continue_on_fail=true)""", - "--if-non-unit-cost", - "--evaluator", - "hlm1=lmcount(lm_rhw(reasonable_orders=true), transform=adapt_costs(one), preferred_operators={pref})".format(**locals()), - "--evaluator", "hff1=ff(transform=adapt_costs(one))", - "--evaluator", - "hlm2=lmcount(lm_rhw(reasonable_orders=true), transform=adapt_costs(plusone), preferred_operators={pref})".format(**locals()), - "--evaluator", "hff2=ff(transform=adapt_costs(plusone))", - "--search", """iterated([ - lazy_greedy([hff1,hlm1],preferred=[hff1,hlm1], - cost_type=one,reopen_closed=false), - lazy_greedy([hff2,hlm2],preferred=[hff2,hlm2], - reopen_closed=false), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=5), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=3), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=2), - lazy_wastar([hff2,hlm2],preferred=[hff2,hlm2],w=1) - ],repeat_last=true,continue_on_fail=true)""", - "--always"]) - for pref in ["none", "simple", "all"] -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step(filter_algorithm=["issue846-v1-lama-no-syn-pref-none", "issue846-v1-lama-no-syn-pref-simple", "issue846-v1-lama-no-syn-pref-all"]) -#exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue846/v2-bjolp.py b/experiments/issue846/v2-bjolp.py deleted file mode 100755 index 8bf2d485d1..0000000000 --- a/experiments/issue846/v2-bjolp.py +++ /dev/null @@ -1,66 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue846-base", "issue846-v2"] -BUILDS = ["release32"] -CONFIG_NICKS = [ - ("bjolp-pref-{pref}".format(**locals()), [ - "--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, pref={pref})".format(**locals()), - "--search", - "astar(lmc,lazy_evaluator=lmc)"]) - for pref in ["false"] -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue846/v2-lama-first.py b/experiments/issue846/v2-lama-first.py deleted file mode 100755 index 0321123bb9..0000000000 --- a/experiments/issue846/v2-lama-first.py +++ /dev/null @@ -1,66 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue846-base", "issue846-v2"] -BUILDS = ["release32"] -CONFIG_NICKS = [ - ("lama-first-pref-{pref}".format(**locals()), [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true), transform=adapt_costs(one), pref={pref})".format(**locals()), - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", "lazy_greedy([hff,hlm],preferred=[hff,hlm]," - "cost_type=one,reopen_closed=false)"]) - for pref in ["true", "false"] -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue849/common_setup.py b/experiments/issue849/common_setup.py deleted file mode 100644 index f64c5c15b3..0000000000 --- a/experiments/issue849/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue849/ms-parser.py b/experiments/issue849/ms-parser.py deleted file mode 100755 index c690fb5193..0000000000 --- a/experiments/issue849/ms-parser.py +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Done initializing merge-and-shrink heuristic \[(.+)s\]', required=False, type=float) -# TODO: replace above by below in future experiments -parser.add_pattern('ms_construction_time', 'Merge-and-shrink algorithm runtime: (.+)s', required=False, type=float) -parser.add_pattern('ms_atomic_construction_time', 't=(.+)s \(after computation of atomic transition systems\)', required=False, type=float) -parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_atomic_fts_constructed(content, props): - ms_atomic_construction_time = props.get('ms_atomic_construction_time') - ms_atomic_fts_constructed = False - if ms_atomic_construction_time is not None: - ms_atomic_fts_constructed = True - props['ms_atomic_fts_constructed'] = ms_atomic_fts_constructed - -parser.add_function(check_atomic_fts_constructed) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'success' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -parser.parse() diff --git a/experiments/issue849/relativescatter.py b/experiments/issue849/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue849/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue849/v1.py b/experiments/issue849/v1.py deleted file mode 100755 index ee141decb1..0000000000 --- a/experiments/issue849/v1.py +++ /dev/null @@ -1,95 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue849-base", "issue849-v1"] -BUILDS = ["release32"] -CONFIG_NICKS = [ - ('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - ('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - ('sccs-dfp-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('ms-parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_atomic_fts_constructed = Attribute('ms_atomic_fts_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_atomic_fts_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue851/common_setup.py b/experiments/issue851/common_setup.py deleted file mode 100644 index ae342d5741..0000000000 --- a/experiments/issue851/common_setup.py +++ /dev/null @@ -1,395 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - # legend_location=(1.3, 0.5), - ) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue851/generalscatter.py b/experiments/issue851/generalscatter.py deleted file mode 100755 index c7ce0e75a8..0000000000 --- a/experiments/issue851/generalscatter.py +++ /dev/null @@ -1,305 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from collections import defaultdict -import logging -import math -import os - -from lab import tools - -from downward.reports.plot import MatplotlibPlot, Matplotlib, PgfPlots, \ - PlotReport, MIN_AXIS - - -class ScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - # TODO: assert that both are linear or log - plot_size = max(report.x_missing_val * 1.01, report.y_missing_val * 1.01) - else: - plot_size = max(report.x_missing_val * 1.5, report.y_missing_val * 1.5) - - # Plot a diagonal black line. Starting at (0,0) often raises errors. - axes.plot([0.001, plot_size], [0.001, plot_size], 'k') - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - # axes.set_xlim(report.xlim_left, report.xlim_right) - # axes.set_ylim(report.ylim_bottom, report.ylim_top) - - for axis in [axes.xaxis, axes.yaxis]: - # MatplotlibPlot.change_axis_formatter( - # axis, report.missing_val if report.show_missing else None) - MatplotlibPlot.change_axis_formatter(axes.xaxis, - report.x_missing_val if report.show_missing else None) - MatplotlibPlot.change_axis_formatter(axes.yaxis, - report.y_missing_val if report.show_missing else None) - return has_points - - -class ScatterPgfPlots(PgfPlots): - @classmethod - def _format_coord(cls, coord): - def format_value(v): - return str(v) if isinstance(v, int) else '%f' % v - return '(%s, %s)' % (format_value(coord[0]), format_value(coord[1])) - - @classmethod - def _get_plot(cls, report): - lines = [] - options = cls._get_axis_options(report) - lines.append('\\begin{axis}[%s]' % cls._format_options(options)) - for category, coords in sorted(report.categories.items()): - plot = {'only marks': True} - lines.append( - '\\addplot+[%s] coordinates {\n%s\n};' % ( - cls._format_options(plot), - ' '.join(cls._format_coord(c) for c in coords))) - if category: - lines.append('\\addlegendentry{%s}' % category) - elif report.has_multiple_categories: - # None is treated as the default category if using multiple - # categories. Add a corresponding entry to the legend. - lines.append('\\addlegendentry{default}') - # Add black line. - start = min(report.min_x, report.min_y) - if report.xlim_left is not None: - start = min(start, report.xlim_left) - if report.ylim_bottom is not None: - start = min(start, report.ylim_bottom) - end = max(report.max_x, report.max_y) - if report.xlim_right: - end = max(end, report.xlim_right) - if report.ylim_top: - end = max(end, report.ylim_top) - if report.show_missing: - end = max(end, report.missing_val) - lines.append( - '\\addplot[color=black] coordinates {(%f, %f) (%d, %d)};' % - (start, start, end, end)) - lines.append('\\end{axis}') - return lines - - @classmethod - def _get_axis_options(cls, report): - opts = PgfPlots._get_axis_options(report) - # Add line for missing values. - for axis in ['x', 'y']: - opts['extra %s ticks' % axis] = report.missing_val - opts['extra %s tick style' % axis] = 'grid=major' - return opts - -class GeneralScatterPlotReport(PlotReport): - """ - Generate a scatter plot for a specific attribute. - """ - def __init__(self, x_algo, y_algo, x_attribute, y_attribute, show_missing=True, get_category=None, **kwargs): - """ - See :class:`.PlotReport` for inherited arguments. - - The keyword argument *attributes* must contain exactly one - attribute. - - Use the *filter_algorithm* keyword argument to select exactly - two algorithms. - - If only one of the two algorithms has a value for a run, only - add a coordinate if *show_missing* is True. - - *get_category* can be a function that takes **two** runs - (dictionaries of properties) and returns a category name. This - name is used to group the points in the plot. If there is more - than one group, a legend is automatically added. Runs for which - this function returns None are shown in a default category and - are not contained in the legend. For example, to group by - domain: - - >>> def domain_as_category(run1, run2): - ... # run2['domain'] has the same value, because we always - ... # compare two runs of the same problem. - ... return run1['domain'] - - Example grouping by difficulty: - - >>> def improvement(run1, run2): - ... time1 = run1.get('search_time', 1800) - ... time2 = run2.get('search_time', 1800) - ... if time1 > time2: - ... return 'better' - ... if time1 == time2: - ... return 'equal' - ... return 'worse' - - >>> from downward.experiment import FastDownwardExperiment - >>> exp = FastDownwardExperiment() - >>> exp.add_report(ScatterPlotReport( - ... attributes=['search_time'], - ... get_category=improvement)) - - Example comparing the number of expanded states for two - algorithms: - - >>> exp.add_report(ScatterPlotReport( - ... attributes=["expansions_until_last_jump"], - ... filter_algorithm=["algorithm-1", "algorithm-2"], - ... get_category=domain_as_category, - ... format="png", # Use "tex" for pgfplots output. - ... ), - ... name="scatterplot-expansions") - - """ - # If the size has not been set explicitly, make it a square. - matplotlib_options = kwargs.get('matplotlib_options', {}) - matplotlib_options.setdefault('figure.figsize', [8, 8]) - kwargs['matplotlib_options'] = matplotlib_options - PlotReport.__init__(self, **kwargs) - if not self.attribute: - logging.critical('ScatterPlotReport needs exactly one attribute') - # By default all values are in the same category. - self.get_category = get_category or (lambda run1, run2: None) - self.show_missing = show_missing - self.xlim_left = self.xlim_left or MIN_AXIS - self.ylim_bottom = self.ylim_bottom or MIN_AXIS - if self.output_format == 'tex': - self.writer = ScatterPgfPlots - else: - self.writer = ScatterMatplotlib - self.x_algo = x_algo - self.y_algo = y_algo - self.x_attribute = x_attribute - self.y_attribute = y_attribute - - def _set_scales(self, xscale, yscale): - PlotReport._set_scales(self, xscale or self.attribute.scale or 'log', yscale) - if self.xscale != self.yscale: - logging.critical('Scatterplots must use the same scale on both axes.') - - def _get_missing_val(self, max_value, scale): - """ - Separate the missing values by plotting them at (max_value * 10) - rounded to the next power of 10. - """ - assert max_value is not None - # HACK! - max_value = 1800 - if scale == 'linear': - return max_value * 1.1 - return int(10 ** math.ceil(math.log10(max_value))) - - def _handle_none_values(self, X, Y, replacement_x, replacement_y): - assert len(X) == len(Y), (X, Y) - if self.show_missing: - return ([x if x is not None else replacement_x for x in X], - [y if y is not None else replacement_y for y in Y]) - return zip(*[(x, y) for x, y in zip(X, Y) if x is not None and y is not None]) - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - x_count = 0 - y_count = 0 - x_none_count = 0 - y_none_count = 0 - for (domain, problem), runs in self.problem_runs.items(): - run1 = next((run for run in runs if run['algorithm'] == self.x_algo), None) - run2 = next((run for run in runs if run['algorithm'] == self.y_algo), None) - if run1 is None or run2 is None: - continue - assert (run1['algorithm'] == self.x_algo and - run2['algorithm'] == self.y_algo) - val1 = run1.get(self.x_attribute) - val2 = run2.get(self.y_attribute) - x_count += 1 - y_count += 1 - if val1 is None: - x_none_count += 1 - if val2 is None: - y_none_count += 1 - # print val1, val2 - if val1 is None and val2 is None: - continue - category = self.get_category(run1, run2) - categories[category].append((val1, val2)) - # print x_count, y_count - # print x_none_count, y_none_count - # print len(categories[None]) - # print categories[None] - return categories - - def _get_limit(self, varlist, limit_type): - assert limit_type == 'max' or limit_type == 'min' - varlist = [x for x in varlist if x is not None] - if(limit_type == 'max'): - return max(varlist) - else: - return min(varlist) - - def _get_plot_size(self, missing_val, scale): - if scale == 'linear': - return missing_val * 1.01 - else: - return missing_val * 1.25 - - def _prepare_categories(self, categories): - categories = PlotReport._prepare_categories(self, categories) - - # Find max-value to fit plot and to draw missing values. - # self.missing_val = self._get_missing_val(max(self.max_x, self.max_y)) - self.x_missing_val = self._get_missing_val(self.max_x, self.xscale) - self.y_missing_val = self._get_missing_val(self.max_y, self.yscale) - # print self.x_missing_val, self.y_missing_val - - # set minima - self.xlim_left = self._get_limit([self.xlim_left, self.min_x],'min') - self.ylim_bottom = self._get_limit([self.ylim_bottom, self.min_y],'min') - - # set maxima - x_plot_size = y_plot_size = None - if self.show_missing: - x_plot_size = self._get_plot_size(self.x_missing_val, self.xscale) - y_plot_size = self._get_plot_size(self.y_missing_val, self.yscale) - self.xlim_right = self._get_limit([self.xlim_right, self.max_x, x_plot_size], 'max') - self.ylim_top = self._get_limit([self.ylim_top, self.max_y, y_plot_size], 'max') - - # self.diagonal_start = self.diagonal_end = None - # if self.show_diagonal: - # self.diagonal_start = max(self.xlim_left, self.ylim_bottom) - # self.diagonal_end = min(self.xlim_right, self.ylim_top) - - new_categories = {} - for category, coords in categories.items(): - X, Y = zip(*coords) - # X, Y = self._handle_none_values(X, Y, self.missing_val) - X, Y = self._handle_none_values(X, Y, self.x_missing_val, self.y_missing_val) - coords = zip(X, Y) - new_categories[category] = coords - # print len(new_categories[None]) - # print new_categories[None] - return new_categories - - def write(self): - if not (len(self.algorithms) == 1 and self.x_algo == self.algorithms[0] and self.y_algo == self.algorithms[0]): - logging.critical( - 'Scatter plots need exactly 1 algorithm that must match x_algo and y_algo: %s, %s, %s' % (self.algorithms, self.x_algo, self.y_algo)) - self.xlabel = self.xlabel or self.x_algo + ": " + self.x_attribute - self.ylabel = self.ylabel or self.y_algo + ": " + self.y_attribute - - suffix = '.' + self.output_format - if not self.outfile.endswith(suffix): - self.outfile += suffix - tools.makedirs(os.path.dirname(self.outfile)) - self._write_plot(self.runs.values(), self.outfile) diff --git a/experiments/issue851/ms-parser.py b/experiments/issue851/ms-parser.py deleted file mode 100755 index 3e868d6cfd..0000000000 --- a/experiments/issue851/ms-parser.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('ms_final_size', 'Final transition system size: (\d+)', required=False, type=int) -parser.add_pattern('ms_construction_time', 'Merge-and-shrink algorithm runtime: (.+)s', required=False, type=float) -parser.add_pattern('ms_atomic_construction_time', 't=(.+)s \(after computation of atomic transition systems\)', required=False, type=float) -parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink computation: (\d+) KB', required=False, type=int) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_atomic_fts_constructed(content, props): - ms_atomic_construction_time = props.get('ms_atomic_construction_time') - ms_atomic_fts_constructed = False - if ms_atomic_construction_time is not None: - ms_atomic_fts_constructed = True - props['ms_atomic_fts_constructed'] = ms_atomic_fts_constructed - -parser.add_function(check_atomic_fts_constructed) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'success' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -parser.parse() diff --git a/experiments/issue851/relativescatter.py b/experiments/issue851/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue851/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue851/v1.py b/experiments/issue851/v1.py deleted file mode 100755 index 44f2e9d83a..0000000000 --- a/experiments/issue851/v1.py +++ /dev/null @@ -1,122 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from generalscatter import GeneralScatterPlotReport -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue851-base", "issue851-v1"] -BUILDS = ["release32"] -CONFIG_NICKS = [ - ('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - ('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - ('sccs-dfp-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('ms-parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_atomic_fts_constructed = Attribute('ms_atomic_fts_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_atomic_fts_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -# TODO: remove this filter when re-running experiments -def check_atomic_fts_constructed(run): - ms_atomic_construction_time = run.get('ms_atomic_construction_time') - ms_atomic_fts_constructed = False - if ms_atomic_construction_time is not None: - ms_atomic_fts_constructed = True - run['ms_atomic_fts_constructed'] = ms_atomic_fts_constructed - return run - -exp.add_comparison_table_step(attributes=attributes,filter=[check_atomic_fts_constructed]) - -exp.add_scatter_plot_step(attributes=[ms_atomic_construction_time]) - -for algo_nick in ['dfp-b50k', 'rl-b50k', 'sccs-dfp-b50k']: - algo = "issue851-v1-{}".format(algo_nick) - exp.add_report( - GeneralScatterPlotReport( - x_algo = algo, - y_algo = algo, - x_attribute='ms_atomic_construction_time', - y_attribute='total_time', - filter_algorithm=[algo], - attributes=['total_time'], - get_category=lambda run1, run2: run1["domain"], - ), - outfile='{}-total_time_vs_ms_atomic_construction_time.png'.format(algo), - ) - -exp.run_steps() diff --git a/experiments/issue851/v2.py b/experiments/issue851/v2.py deleted file mode 100755 index c359a65bda..0000000000 --- a/experiments/issue851/v2.py +++ /dev/null @@ -1,114 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from generalscatter import GeneralScatterPlotReport -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue851-base-v2", "issue851-v2"] -BUILDS = ["release32"] -CONFIG_NICKS = [ - ('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - # ('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - # ('sbmiasm-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - # ('sccs-dfp-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('ms-parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_atomic_fts_constructed = Attribute('ms_atomic_fts_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_atomic_fts_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) - -exp.add_scatter_plot_step(attributes=[ms_atomic_construction_time]) - -for algo_nick in ['dfp-b50k']: # 'rl-b50k', 'sbmiasm-b50k', 'sccs-dfp-b50k']: - algo = "issue851-v2-{}".format(algo_nick) - exp.add_report( - GeneralScatterPlotReport( - x_algo = algo, - y_algo = algo, - x_attribute='ms_atomic_construction_time', - y_attribute='total_time', - filter_algorithm=[algo], - attributes=['total_time'], - get_category=lambda run1, run2: run1["domain"], - ), - outfile='{}-total_time_vs_ms_atomic_construction_time.png'.format(algo), - ) - -exp.run_steps() diff --git a/experiments/issue851/v3-debug.py b/experiments/issue851/v3-debug.py deleted file mode 100755 index 3f5e81448f..0000000000 --- a/experiments/issue851/v3-debug.py +++ /dev/null @@ -1,97 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from generalscatter import GeneralScatterPlotReport -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue851-v3"] -BUILDS = ["debug32"] -CONFIG_NICKS = [ - ('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - # ('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - # ('sbmiasm-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - # ('sccs-dfp-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('ms-parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_atomic_fts_constructed = Attribute('ms_atomic_fts_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_atomic_fts_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_absolute_report_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue851/v3.py b/experiments/issue851/v3.py deleted file mode 100755 index eb8639b6c8..0000000000 --- a/experiments/issue851/v3.py +++ /dev/null @@ -1,114 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from generalscatter import GeneralScatterPlotReport -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue851-base-v2", "issue851-v2", "issue851-v3"] -BUILDS = ["release32"] -CONFIG_NICKS = [ - ('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - # ('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - # ('sbmiasm-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - # ('sccs-dfp-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('ms-parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_atomic_fts_constructed = Attribute('ms_atomic_fts_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_atomic_fts_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) - -exp.add_scatter_plot_step(attributes=[ms_atomic_construction_time]) - -for algo_nick in ['dfp-b50k']: # 'rl-b50k', 'sbmiasm-b50k', 'sccs-dfp-b50k']: - algo = "issue851-v2-{}".format(algo_nick) - exp.add_report( - GeneralScatterPlotReport( - x_algo = algo, - y_algo = algo, - x_attribute='ms_atomic_construction_time', - y_attribute='total_time', - filter_algorithm=[algo], - attributes=['total_time'], - get_category=lambda run1, run2: run1["domain"], - ), - outfile='{}-total_time_vs_ms_atomic_construction_time.png'.format(algo), - ) - -exp.run_steps() diff --git a/experiments/issue851/v4.py b/experiments/issue851/v4.py deleted file mode 100755 index ddee6aad4a..0000000000 --- a/experiments/issue851/v4.py +++ /dev/null @@ -1,114 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from generalscatter import GeneralScatterPlotReport -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue851-base-v2", "issue851-v3", "issue851-v4"] -BUILDS = ["release32"] -CONFIG_NICKS = [ - ('dfp-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - # ('rl-b50k', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - # ('sbmiasm-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), - # ('sccs-dfp-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('ms-parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_atomic_fts_constructed = Attribute('ms_atomic_fts_constructed', absolute=True, min_wins=False) -ms_final_size = Attribute('ms_final_size', absolute=False, min_wins=True) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_atomic_fts_constructed, - ms_final_size, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) - -exp.add_scatter_plot_step(attributes=[ms_atomic_construction_time]) - -for algo_nick in ['dfp-b50k']: # 'rl-b50k', 'sbmiasm-b50k', 'sccs-dfp-b50k']: - algo = "issue851-v2-{}".format(algo_nick) - exp.add_report( - GeneralScatterPlotReport( - x_algo = algo, - y_algo = algo, - x_attribute='ms_atomic_construction_time', - y_attribute='total_time', - filter_algorithm=[algo], - attributes=['total_time'], - get_category=lambda run1, run2: run1["domain"], - ), - outfile='{}-total_time_vs_ms_atomic_construction_time.png'.format(algo), - ) - -exp.run_steps() diff --git a/experiments/issue860/common_setup.py b/experiments/issue860/common_setup.py deleted file mode 100644 index f64c5c15b3..0000000000 --- a/experiments/issue860/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue860/relativescatter.py b/experiments/issue860/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue860/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue860/v1.py b/experiments/issue860/v1.py deleted file mode 100755 index ad68c80690..0000000000 --- a/experiments/issue860/v1.py +++ /dev/null @@ -1,62 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue860-base", "issue860-v1"] -BUILDS = ["release"] -CONFIG_NICKS = [ - ("astar-blind", ["--search", "astar(blind)"]), -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue860/v2.py b/experiments/issue860/v2.py deleted file mode 100755 index eb51dc3c9c..0000000000 --- a/experiments/issue860/v2.py +++ /dev/null @@ -1,66 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue860-base", "issue860-v2"] -BUILDS = ["release"] -CONFIG_NICKS = [ - ("astar-blind", ["--search", "astar(blind())"]), - ("astar-lmcut", ["--search", "astar(lmcut())"]), # inconsistent heuristic to test re-opening - ("bjolp", ["--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true)", - "--search", - "astar(lmc,lazy_evaluator=lmc)"]), # test lazy evaluator -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue862/common_setup.py b/experiments/issue862/common_setup.py deleted file mode 100644 index 5f92dd054c..0000000000 --- a/experiments/issue862/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, outfile=None, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = outfile or os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-' + os.path.basename(outfile), subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue862/relativescatter.py b/experiments/issue862/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue862/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue862/translator_additional_parser.py b/experiments/issue862/translator_additional_parser.py deleted file mode 100755 index 13a3802297..0000000000 --- a/experiments/issue862/translator_additional_parser.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -import hashlib - -from lab.parser import Parser - -def add_hash_value(content, props): - props['translator_output_sas_hash'] = hashlib.sha512(content).hexdigest() - -parser = Parser() -parser.add_function(add_hash_value, file="output.sas") -parser.parse() diff --git a/experiments/issue862/v1.py b/experiments/issue862/v1.py deleted file mode 100755 index 534c21876b..0000000000 --- a/experiments/issue862/v1.py +++ /dev/null @@ -1,211 +0,0 @@ -#! /usr/bin/env python2 -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab import tools - -from downward.reports.compare import ComparativeReport -from downward.reports import PlanningReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue862-base", "issue862-v1", "issue862-v2", "issue862-v3"] -CONFIGS = [ - IssueConfig( - "translate-only", - [], - driver_options=["--translate"]) -] -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="malte.helmert@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -# This was generated by running "./suites.py all" in the benchmarks -# repository. I don't know if this is there is a better way of doing -# this. -SUITE = [ - 'agricola-opt18-strips', - 'agricola-sat18-strips', - 'airport', - 'airport-adl', - 'assembly', - 'barman-mco14-strips', - 'barman-opt11-strips', - 'barman-opt14-strips', - 'barman-sat11-strips', - 'barman-sat14-strips', - 'blocks', - 'caldera-opt18-adl', - 'caldera-sat18-adl', - 'caldera-split-opt18-adl', - 'caldera-split-sat18-adl', - 'cavediving-14-adl', - 'childsnack-opt14-strips', - 'childsnack-sat14-strips', - 'citycar-opt14-adl', - 'citycar-sat14-adl', - 'data-network-opt18-strips', - 'data-network-sat18-strips', - 'depot', - 'driverlog', - 'elevators-opt08-strips', - 'elevators-opt11-strips', - 'elevators-sat08-strips', - 'elevators-sat11-strips', - 'flashfill-sat18-adl', - 'floortile-opt11-strips', - 'floortile-opt14-strips', - 'floortile-sat11-strips', - 'floortile-sat14-strips', - 'freecell', - 'ged-opt14-strips', - 'ged-sat14-strips', - 'grid', - 'gripper', - 'hiking-agl14-strips', - 'hiking-opt14-strips', - 'hiking-sat14-strips', - 'logistics00', - 'logistics98', - 'maintenance-opt14-adl', - 'maintenance-sat14-adl', - 'miconic', - 'miconic-fulladl', - 'miconic-simpleadl', - 'movie', - 'mprime', - 'mystery', - 'no-mprime', - 'no-mystery', - 'nomystery-opt11-strips', - 'nomystery-sat11-strips', - 'nurikabe-opt18-adl', - 'nurikabe-sat18-adl', - 'openstacks', - 'openstacks-agl14-strips', - 'openstacks-opt08-adl', - 'openstacks-opt08-strips', - 'openstacks-opt11-strips', - 'openstacks-opt14-strips', - 'openstacks-sat08-adl', - 'openstacks-sat08-strips', - 'openstacks-sat11-strips', - 'openstacks-sat14-strips', - 'openstacks-strips', - 'optical-telegraphs', - 'organic-synthesis-opt18-strips', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-opt18-strips', - 'organic-synthesis-split-sat18-strips', - 'parcprinter-08-strips', - 'parcprinter-opt11-strips', - 'parcprinter-sat11-strips', - 'parking-opt11-strips', - 'parking-opt14-strips', - 'parking-sat11-strips', - 'parking-sat14-strips', - 'pathways', - 'pathways-noneg', - 'pegsol-08-strips', - 'pegsol-opt11-strips', - 'pegsol-sat11-strips', - 'petri-net-alignment-opt18-strips', - 'philosophers', - 'pipesworld-notankage', - 'pipesworld-tankage', - 'psr-large', - 'psr-middle', - 'psr-small', - 'rovers', - 'satellite', - 'scanalyzer-08-strips', - 'scanalyzer-opt11-strips', - 'scanalyzer-sat11-strips', - 'schedule', - 'settlers-opt18-adl', - 'settlers-sat18-adl', - 'snake-opt18-strips', - 'snake-sat18-strips', - 'sokoban-opt08-strips', - 'sokoban-opt11-strips', - 'sokoban-sat08-strips', - 'sokoban-sat11-strips', - 'spider-opt18-strips', - 'spider-sat18-strips', - 'storage', - 'termes-opt18-strips', - 'termes-sat18-strips', - 'tetris-opt14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'thoughtful-sat14-strips', - 'tidybot-opt11-strips', - 'tidybot-opt14-strips', - 'tidybot-sat11-strips', - 'tpp', - 'transport-opt08-strips', - 'transport-opt11-strips', - 'transport-opt14-strips', - 'transport-sat08-strips', - 'transport-sat11-strips', - 'transport-sat14-strips', - 'trucks', - 'trucks-strips', - 'visitall-opt11-strips', - 'visitall-opt14-strips', - 'visitall-sat11-strips', - 'visitall-sat14-strips', - 'woodworking-opt08-strips', - 'woodworking-opt11-strips', - 'woodworking-sat08-strips', - 'woodworking-sat11-strips', - 'zenotravel', -] - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_resource("translator_additional_parser", - "translator_additional_parser.py", - dest="translator_additional_parser.py") -del exp.commands['remove-output-sas'] -exp.add_command("translator_additional_parser", - ["{translator_additional_parser}"]) - -class TranslatorDiffReport(PlanningReport): - def get_cell(self, run): - return ";".join(run.get(attr) for attr in self.attributes) - - def get_text(self): - lines = [] - for runs in self.problem_runs.values(): - hashes = set([r.get("translator_output_sas_hash") for r in runs]) - if len(hashes) > 1 or None in hashes: - lines.append(";".join([self.get_cell(r) for r in runs])) - return "\n".join(lines) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_report(TranslatorDiffReport( - attributes=["domain", "problem", "algorithm", "run_dir"] - ), outfile="different_output_sas.csv" -) - -exp.run_steps() diff --git a/experiments/issue862/v5-planner.py b/experiments/issue862/v5-planner.py deleted file mode 100755 index 1190133b73..0000000000 --- a/experiments/issue862/v5-planner.py +++ /dev/null @@ -1,78 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue862-base", "issue862-v5"] -BUILDS = ["release32"] -CONFIG_DICT = { - "lazy-greedy-{h}".format(**locals()): [ - "--evaluator", - "h={h}()".format(**locals()), - "--search", - "lazy_greedy([h], preferred=[h])"] - for h in ["hmax", "add", "ff", "cg", "cea"] -} -CONFIG_DICT["lama-first"] = [ - "--evaluator", - "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=false)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], - cost_type=one,reopen_closed=false)"""] -CONFIG_DICT["blind"] = ["--search", "astar(blind())"] -CONFIGS = [ - IssueConfig( - "-".join([config_nick, build]), - config, - build_options=[build], - driver_options=["--build", build, "--overall-time-limit", "30m"]) - for build in BUILDS - for config_nick, config in CONFIG_DICT.items() -] -SUITE = [ - "airport-adl", - "assembly", - "miconic-fulladl", - "psr-large", - "psr-middle", -] -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue862/v5.py b/experiments/issue862/v5.py deleted file mode 100755 index 0087cf41ee..0000000000 --- a/experiments/issue862/v5.py +++ /dev/null @@ -1,241 +0,0 @@ -#! /usr/bin/env python2 -# -*- coding: utf-8 -*- - -from collections import defaultdict -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab import tools - -from downward.reports.compare import ComparativeReport -from downward.reports import PlanningReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -EXPNAME = common_setup.get_experiment_name() -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue862-base", "issue862-v1", "issue862-v4", "issue862-v5"] -CONFIGS = [ - IssueConfig( - "translate-only", - [], - driver_options=["--translate"]) -] -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="malte.helmert@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -# This was generated by running "./suites.py all" in the benchmarks -# repository. -SUITE = [ - 'agricola-opt18-strips', - 'agricola-sat18-strips', - 'airport', - 'airport-adl', - 'assembly', - 'barman-mco14-strips', - 'barman-opt11-strips', - 'barman-opt14-strips', - 'barman-sat11-strips', - 'barman-sat14-strips', - 'blocks', - 'caldera-opt18-adl', - 'caldera-sat18-adl', - 'caldera-split-opt18-adl', - 'caldera-split-sat18-adl', - 'cavediving-14-adl', - 'childsnack-opt14-strips', - 'childsnack-sat14-strips', - 'citycar-opt14-adl', - 'citycar-sat14-adl', - 'data-network-opt18-strips', - 'data-network-sat18-strips', - 'depot', - 'driverlog', - 'elevators-opt08-strips', - 'elevators-opt11-strips', - 'elevators-sat08-strips', - 'elevators-sat11-strips', - 'flashfill-sat18-adl', - 'floortile-opt11-strips', - 'floortile-opt14-strips', - 'floortile-sat11-strips', - 'floortile-sat14-strips', - 'freecell', - 'ged-opt14-strips', - 'ged-sat14-strips', - 'grid', - 'gripper', - 'hiking-agl14-strips', - 'hiking-opt14-strips', - 'hiking-sat14-strips', - 'logistics00', - 'logistics98', - 'maintenance-opt14-adl', - 'maintenance-sat14-adl', - 'miconic', - 'miconic-fulladl', - 'miconic-simpleadl', - 'movie', - 'mprime', - 'mystery', - 'no-mprime', - 'no-mystery', - 'nomystery-opt11-strips', - 'nomystery-sat11-strips', - 'nurikabe-opt18-adl', - 'nurikabe-sat18-adl', - 'openstacks', - 'openstacks-agl14-strips', - 'openstacks-opt08-adl', - 'openstacks-opt08-strips', - 'openstacks-opt11-strips', - 'openstacks-opt14-strips', - 'openstacks-sat08-adl', - 'openstacks-sat08-strips', - 'openstacks-sat11-strips', - 'openstacks-sat14-strips', - 'openstacks-strips', - 'optical-telegraphs', - 'organic-synthesis-opt18-strips', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-opt18-strips', - 'organic-synthesis-split-sat18-strips', - 'parcprinter-08-strips', - 'parcprinter-opt11-strips', - 'parcprinter-sat11-strips', - 'parking-opt11-strips', - 'parking-opt14-strips', - 'parking-sat11-strips', - 'parking-sat14-strips', - 'pathways', - 'pathways-noneg', - 'pegsol-08-strips', - 'pegsol-opt11-strips', - 'pegsol-sat11-strips', - 'petri-net-alignment-opt18-strips', - 'philosophers', - 'pipesworld-notankage', - 'pipesworld-tankage', - 'psr-large', - 'psr-middle', - 'psr-small', - 'rovers', - 'satellite', - 'scanalyzer-08-strips', - 'scanalyzer-opt11-strips', - 'scanalyzer-sat11-strips', - 'schedule', - 'settlers-opt18-adl', - 'settlers-sat18-adl', - 'snake-opt18-strips', - 'snake-sat18-strips', - 'sokoban-opt08-strips', - 'sokoban-opt11-strips', - 'sokoban-sat08-strips', - 'sokoban-sat11-strips', - 'spider-opt18-strips', - 'spider-sat18-strips', - 'storage', - 'termes-opt18-strips', - 'termes-sat18-strips', - 'tetris-opt14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'thoughtful-sat14-strips', - 'tidybot-opt11-strips', - 'tidybot-opt14-strips', - 'tidybot-sat11-strips', - 'tpp', - 'transport-opt08-strips', - 'transport-opt11-strips', - 'transport-opt14-strips', - 'transport-sat08-strips', - 'transport-sat11-strips', - 'transport-sat14-strips', - 'trucks', - 'trucks-strips', - 'visitall-opt11-strips', - 'visitall-opt14-strips', - 'visitall-sat11-strips', - 'visitall-sat14-strips', - 'woodworking-opt08-strips', - 'woodworking-opt11-strips', - 'woodworking-sat08-strips', - 'woodworking-sat11-strips', - 'zenotravel', -] - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("translator_additional_parser.py") - -del exp.commands['remove-output-sas'] - -class TranslatorDiffReport(PlanningReport): - def get_cell(self, run): - return ";".join(run.get(attr) for attr in self.attributes) - - def get_text(self): - lines = [] - for runs in self.problem_runs.values(): - hashes = set([r.get("translator_output_sas_hash") for r in runs]) - if len(hashes) > 1 or None in hashes: - lines.append(";".join([self.get_cell(r) for r in runs])) - return "\n".join(lines) - -class SameValueFilters(object): - """Ignore runs for a task where all algorithms have the same value.""" - def __init__(self, attribute): - self._attribute = attribute - self._tasks_to_values = defaultdict(list) - - def _get_task(self, run): - return (run['domain'], run['problem']) - - def store_values(self, run): - value = run.get(self._attribute) - self._tasks_to_values[self._get_task(run)].append(value) - # Don't filter this run, yet. - return True - - def filter_tasks_with_equal_values(self, run): - values = self._tasks_to_values[self._get_task(run)] - return len(set(values)) != 1 - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_parse_again_step() -exp.add_fetcher(name='fetch') - -ATTRIBUTES = ["error", "run_dir", "translator_*", "translator_output_sas_hash"] -exp.add_absolute_report_step( - outfile=os.path.join(exp.eval_dir, "{EXPNAME}.html".format(**locals())), - attributes=ATTRIBUTES) -same_value_flters = SameValueFilters("translator_output_sas_hash") -exp.add_absolute_report_step( - outfile=os.path.join(exp.eval_dir, "{EXPNAME}-filtered.html".format(**locals())), - attributes=ATTRIBUTES, - filter=[same_value_flters.store_values, same_value_flters.filter_tasks_with_equal_values]) -exp.add_report(TranslatorDiffReport( - attributes=["domain", "problem", "algorithm", "run_dir"] - ), outfile="different_output_sas.csv" -) - -exp.run_steps() diff --git a/experiments/issue869/base-translate-all.py b/experiments/issue869/base-translate-all.py deleted file mode 100755 index 9e0c8716aa..0000000000 --- a/experiments/issue869/base-translate-all.py +++ /dev/null @@ -1,118 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue869-base"] -BUILDS = ["release32"] -CONFIG_NICKS = [ - ("translate", []), -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build, "--translate"]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = [ - 'agricola-opt18-strips', 'agricola-sat18-strips', 'airport', - 'airport-adl', 'assembly', 'barman-mco14-strips', - 'barman-opt11-strips', 'barman-opt14-strips', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'caldera-opt18-adl', - 'caldera-sat18-adl', 'caldera-split-opt18-adl', - 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-opt14-strips', 'childsnack-sat14-strips', - 'citycar-opt14-adl', 'citycar-sat14-adl', - 'data-network-opt18-strips', 'data-network-sat18-strips', 'depot', - 'driverlog', 'elevators-opt08-strips', 'elevators-opt11-strips', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-opt11-strips', - 'floortile-opt14-strips', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-opt14-strips', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-agl14-strips', - 'hiking-opt14-strips', 'hiking-sat14-strips', 'logistics00', - 'logistics98', 'maintenance-opt14-adl', 'maintenance-sat14-adl', - 'miconic', 'miconic-fulladl', 'miconic-simpleadl', 'movie', - 'mprime', 'mystery', 'no-mprime', 'no-mystery', - 'nomystery-opt11-strips', 'nomystery-sat11-strips', - 'nurikabe-opt18-adl', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-agl14-strips', 'openstacks-opt08-adl', - 'openstacks-opt08-strips', 'openstacks-opt11-strips', - 'openstacks-opt14-strips', 'openstacks-sat08-adl', - 'openstacks-sat08-strips', 'openstacks-sat11-strips', - 'openstacks-sat14-strips', 'openstacks-strips', - 'optical-telegraphs', 'organic-synthesis-opt18-strips', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-opt18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parcprinter-sat11-strips', - 'parking-opt11-strips', 'parking-opt14-strips', - 'parking-sat11-strips', 'parking-sat14-strips', 'pathways', - 'pathways-noneg', 'pegsol-08-strips', 'pegsol-opt11-strips', - 'pegsol-sat11-strips', 'petri-net-alignment-opt18-strips', - 'philosophers', 'pipesworld-notankage', 'pipesworld-tankage', - 'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'scanalyzer-sat11-strips', 'schedule', 'settlers-opt18-adl', - 'settlers-sat18-adl', 'snake-opt18-strips', 'snake-sat18-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', - 'spider-opt18-strips', 'spider-sat18-strips', 'storage', - 'termes-opt18-strips', 'termes-sat18-strips', 'tetris-opt14-strips', - 'tetris-sat14-strips', 'thoughtful-mco14-strips', - 'thoughtful-sat14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -#exp.add_parser(exp.SINGLE_SEARCH_PARSER) -#exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step(attributes=["translator_time_done", "translator_peak_memory"]) -#exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue869/common_setup.py b/experiments/issue869/common_setup.py deleted file mode 100644 index f64c5c15b3..0000000000 --- a/experiments/issue869/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue869/relativescatter.py b/experiments/issue869/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue869/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue870/base-opt.py b/experiments/issue870/base-opt.py deleted file mode 100755 index 7c2135ad7a..0000000000 --- a/experiments/issue870/base-opt.py +++ /dev/null @@ -1,80 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue870-base"] -BUILDS = ["release64", "release64dynamic"] -CONFIG_NICKS = [ - ("blind", ["--search", "astar(blind())"]), - ("lmcut", ["--search", "astar(lmcut())"]), - #("seq", ["--search", "astar(operatorcounting([state_equation_constraints()]))"]), -] -CONFIGS = [ - IssueConfig( - config_nick + ":" + build, - config, - build_options=[build], - driver_options=["--build", build]) - for rev in REVISIONS - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_parse_again_step() - -#exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -for rev in REVISIONS: - algorithm_pairs = [ - ("{rev}-{nick}:{build1}".format(**locals()), - "{rev}-{nick}:{build2}".format(**locals()), - "Diff ({rev}-{nick})".format(**locals())) - for build1, build2 in itertools.combinations(BUILDS, 2) - for nick, config in CONFIG_NICKS] - exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue839-opt-static-vs-dynamic") - -exp.run_steps() diff --git a/experiments/issue870/common_setup.py b/experiments/issue870/common_setup.py deleted file mode 100644 index f64c5c15b3..0000000000 --- a/experiments/issue870/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue870/relativescatter.py b/experiments/issue870/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue870/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue870/v1-seq.py b/experiments/issue870/v1-seq.py deleted file mode 100755 index 1ae60ce4ec..0000000000 --- a/experiments/issue870/v1-seq.py +++ /dev/null @@ -1,71 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -BUILDS_AND_REVISIONS = [("release64", "issue870-base"), ("release64dynamic", "issue870-v1")] -CONFIG_NICKS = [ - ("seq", ["--search", "astar(operatorcounting([state_equation_constraints()]))"]), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=[], - configs=[], - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) -for build, rev in BUILDS_AND_REVISIONS: - for config_nick, config in CONFIG_NICKS: - exp.add_algorithm( - ":".join([config_nick, build, rev]), - common_setup.get_repo_base(), - rev, - config, - build_options=[build], - driver_options=["--build", build]) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_absolute_report_step() -#exp.add_comparison_table_step() - -attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES - -algorithm_pairs = [ - ("seq:release64:issue870-base", - "seq:release64dynamic:issue870-v1", - "Diff (seq)") - ] -exp.add_report( - ComparativeReport(algorithm_pairs, attributes=attributes), - name="issue870-seq-static-vs-dynamic") - -exp.run_steps() diff --git a/experiments/issue874/common_setup.py b/experiments/issue874/common_setup.py deleted file mode 100644 index 737e1870b8..0000000000 --- a/experiments/issue874/common_setup.py +++ /dev/null @@ -1,395 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) - diff --git a/experiments/issue874/relativescatter.py b/experiments/issue874/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue874/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue874/translator_additional_parser.py b/experiments/issue874/translator_additional_parser.py deleted file mode 100755 index 13a3802297..0000000000 --- a/experiments/issue874/translator_additional_parser.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -import hashlib - -from lab.parser import Parser - -def add_hash_value(content, props): - props['translator_output_sas_hash'] = hashlib.sha512(content).hexdigest() - -parser = Parser() -parser.add_function(add_hash_value, file="output.sas") -parser.parse() diff --git a/experiments/issue874/v1.py b/experiments/issue874/v1.py deleted file mode 100755 index 2e2040a13c..0000000000 --- a/experiments/issue874/v1.py +++ /dev/null @@ -1,217 +0,0 @@ -#! /usr/bin/env python2 -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab import tools - -from downward.reports.compare import ComparativeReport -from downward.reports import PlanningReport -from downward.experiment import FastDownwardExperiment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue874-base", "issue874-v1"] -CONFIGS = [ - IssueConfig( - "translate-only", - [], - driver_options=["--translate"]) -] -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="gabriele.roeger@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -# This was generated by running "./suites.py all" in the benchmarks -# repository. I don't know if this is there is a better way of doing -# this. -SUITE = [ - 'agricola-opt18-strips', - 'agricola-sat18-strips', - 'airport', - 'airport-adl', - 'assembly', - 'barman-mco14-strips', - 'barman-opt11-strips', - 'barman-opt14-strips', - 'barman-sat11-strips', - 'barman-sat14-strips', - 'blocks', - 'caldera-opt18-adl', - 'caldera-sat18-adl', - 'caldera-split-opt18-adl', - 'caldera-split-sat18-adl', - 'cavediving-14-adl', - 'childsnack-opt14-strips', - 'childsnack-sat14-strips', - 'citycar-opt14-adl', - 'citycar-sat14-adl', - 'data-network-opt18-strips', - 'data-network-sat18-strips', - 'depot', - 'driverlog', - 'elevators-opt08-strips', - 'elevators-opt11-strips', - 'elevators-sat08-strips', - 'elevators-sat11-strips', - 'flashfill-sat18-adl', - 'floortile-opt11-strips', - 'floortile-opt14-strips', - 'floortile-sat11-strips', - 'floortile-sat14-strips', - 'freecell', - 'ged-opt14-strips', - 'ged-sat14-strips', - 'grid', - 'gripper', - 'hiking-agl14-strips', - 'hiking-opt14-strips', - 'hiking-sat14-strips', - 'logistics00', - 'logistics98', - 'maintenance-opt14-adl', - 'maintenance-sat14-adl', - 'miconic', - 'miconic-fulladl', - 'miconic-simpleadl', - 'movie', - 'mprime', - 'mystery', - 'no-mprime', - 'no-mystery', - 'nomystery-opt11-strips', - 'nomystery-sat11-strips', - 'nurikabe-opt18-adl', - 'nurikabe-sat18-adl', - 'openstacks', - 'openstacks-agl14-strips', - 'openstacks-opt08-adl', - 'openstacks-opt08-strips', - 'openstacks-opt11-strips', - 'openstacks-opt14-strips', - 'openstacks-sat08-adl', - 'openstacks-sat08-strips', - 'openstacks-sat11-strips', - 'openstacks-sat14-strips', - 'openstacks-strips', - 'optical-telegraphs', - 'organic-synthesis-opt18-strips', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-opt18-strips', - 'organic-synthesis-split-sat18-strips', - 'parcprinter-08-strips', - 'parcprinter-opt11-strips', - 'parcprinter-sat11-strips', - 'parking-opt11-strips', - 'parking-opt14-strips', - 'parking-sat11-strips', - 'parking-sat14-strips', - 'pathways', - 'pathways-noneg', - 'pegsol-08-strips', - 'pegsol-opt11-strips', - 'pegsol-sat11-strips', - 'petri-net-alignment-opt18-strips', - 'philosophers', - 'pipesworld-notankage', - 'pipesworld-tankage', - 'psr-large', - 'psr-middle', - 'psr-small', - 'rovers', - 'satellite', - 'scanalyzer-08-strips', - 'scanalyzer-opt11-strips', - 'scanalyzer-sat11-strips', - 'schedule', - 'settlers-opt18-adl', - 'settlers-sat18-adl', - 'snake-opt18-strips', - 'snake-sat18-strips', - 'sokoban-opt08-strips', - 'sokoban-opt11-strips', - 'sokoban-sat08-strips', - 'sokoban-sat11-strips', - 'spider-opt18-strips', - 'spider-sat18-strips', - 'storage', - 'termes-opt18-strips', - 'termes-sat18-strips', - 'tetris-opt14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'thoughtful-sat14-strips', - 'tidybot-opt11-strips', - 'tidybot-opt14-strips', - 'tidybot-sat11-strips', - 'tpp', - 'transport-opt08-strips', - 'transport-opt11-strips', - 'transport-opt14-strips', - 'transport-sat08-strips', - 'transport-sat11-strips', - 'transport-sat14-strips', - 'trucks', - 'trucks-strips', - 'visitall-opt11-strips', - 'visitall-opt14-strips', - 'visitall-sat11-strips', - 'visitall-sat14-strips', - 'woodworking-opt08-strips', - 'woodworking-opt11-strips', - 'woodworking-sat08-strips', - 'woodworking-sat11-strips', - 'zenotravel', -] - -ATTRIBUTES = [ - 'translator_time_done', - 'translator_mutex_groups', - 'translator_variables' -] - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser('translator_additional_parser.py') -exp.add_parser(FastDownwardExperiment.TRANSLATOR_PARSER) -del exp.commands['remove-output-sas'] - -class TranslatorDiffReport(PlanningReport): - def get_cell(self, run): - return ";".join(run.get(attr) for attr in self.attributes) - - def get_text(self): - lines = [] - for runs in self.problem_runs.values(): - hashes = set([r.get("translator_output_sas_hash") for r in runs]) - if len(hashes) > 1 or None in hashes: - lines.append(";".join([self.get_cell(r) for r in runs])) - return "\n".join(lines) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_comparison_table_step(attributes=ATTRIBUTES) - -exp.add_report(TranslatorDiffReport( - attributes=["domain", "problem", "algorithm", "run_dir"] - ), outfile="different_output_sas.csv" -) - -exp.run_steps() - diff --git a/experiments/issue880/common_setup.py b/experiments/issue880/common_setup.py deleted file mode 100644 index f64c5c15b3..0000000000 --- a/experiments/issue880/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue880/parser.py b/experiments/issue880/parser.py deleted file mode 100755 index d5cd0df10f..0000000000 --- a/experiments/issue880/parser.py +++ /dev/null @@ -1,104 +0,0 @@ -#! /usr/bin/env python - -import logging -import re - -from lab.parser import Parser - - -class CommonParser(Parser): - def add_difference(self, diff, val1, val2): - def diff_func(content, props): - if props.get(val1) is None or props.get(val2) is None: - diff_val = None - else: - diff_val = props.get(val1) - props.get(val2) - props[diff] = diff_val - self.add_function(diff_func) - - def _get_flags(self, flags_string): - flags = 0 - for char in flags_string: - flags |= getattr(re, char) - return flags - - def add_repeated_pattern( - self, name, regex, file="run.log", required=False, type=int, - flags=""): - def find_all_occurences(content, props): - matches = re.findall(regex, content, flags=self._get_flags(flags)) - if required and not matches: - logging.error("Pattern {0} not found in file {1}".format(regex, file)) - props[name] = [type(m) for m in matches] - - self.add_function(find_all_occurences, file=file) - - def add_pattern(self, name, regex, file="run.log", required=False, type=int, flags=""): - Parser.add_pattern(self, name, regex, file=file, required=required, type=type, flags=flags) - - def add_bottom_up_pattern(self, name, regex, file="run.log", required=True, type=int, flags=""): - - def search_from_bottom(content, props): - reversed_content = "\n".join(reversed(content.splitlines())) - match = re.search(regex, reversed_content, flags=self._get_flags(flags)) - if required and not match: - logging.error("Pattern {0} not found in file {1}".format(regex, file)) - if match: - props[name] = type(match.group(1)) - - self.add_function(search_from_bottom, file=file) - - -def no_search(content, props): - if "search_start_time" not in props: - error = props.get("error") - if error is not None and error != "incomplete-search-found-no-plan": - props["error"] = "no-search-due-to-" + error - - -REFINEMENT_ATTRIBUTES = [ - ("time_for_finding_traces", r"Time for finding abstract traces: (.+)s"), - ("time_for_finding_flaws", r"Time for finding flaws: (.+)s"), - ("time_for_splitting_states", r"Time for splitting states: (.+)s"), -] - - -def compute_total_times(content, props): - for attribute, pattern in REFINEMENT_ATTRIBUTES: - props["total_" + attribute] = sum(props[attribute]) - - -def add_time_analysis(content, props): - init_time = props.get("init_time") - if not init_time: - return - parts = [] - parts.append("{init_time:.2f}:".format(**props)) - for attribute, pattern in REFINEMENT_ATTRIBUTES: - time = props["total_" + attribute] - relative_time = time / init_time - print time, type(time) - parts.append("{:.2f} ({:.2f})".format(time, relative_time)) - - props["time_analysis"] = " ".join(parts) - - -def main(): - parser = CommonParser() - parser.add_pattern("search_start_time", r"\[g=0, 1 evaluated, 0 expanded, t=(.+)s, \d+ KB\]", type=float) - parser.add_pattern("search_start_memory", r"\[g=0, 1 evaluated, 0 expanded, t=.+s, (\d+) KB\]", type=int) - parser.add_pattern("init_time", r"Time for initializing additive Cartesian heuristic: (.+)s", type=float) - parser.add_pattern("cartesian_states", r"^Cartesian states: (\d+)\n", type=int) - - for attribute, pattern in REFINEMENT_ATTRIBUTES: - parser.add_repeated_pattern(attribute, pattern, type=float, required=False) - - parser.add_function(no_search) - parser.add_function(compute_total_times) - parser.add_function(add_time_analysis) - - parser.parse() - - -if __name__ == "__main__": - main() diff --git a/experiments/issue880/relativescatter.py b/experiments/issue880/relativescatter.py deleted file mode 100644 index d8033a3324..0000000000 --- a/experiments/issue880/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if not val1 or not val2: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue880/v1.py b/experiments/issue880/v1.py deleted file mode 100755 index f0e898a9fa..0000000000 --- a/experiments/issue880/v1.py +++ /dev/null @@ -1,81 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -BUILD = "release64" -REVISIONS = ["issue880-base", "issue880-v1"] -DRIVER_OPTIONS = ["--build", BUILD] -CONFIGS = [ - IssueConfig( - nick + "-" + max_transitions_nick, - config, - build_options=[BUILD], - driver_options=DRIVER_OPTIONS) - for max_transitions_nick, max_transitions in [("1M", 1000000), ("10M", 10000000)] - for nick, config in [ - ("cegar-original", ["--search", "astar(cegar(subtasks=[original()], max_transitions={max_transitions}))".format(**locals())]), - ("cegar-landmarks-goals", ["--search", "astar(cegar(max_transitions={max_transitions}))".format(**locals())]), - ] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = [ - #"depot:p02.pddl", - "gripper:prob01.pddl"] - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -#exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -REFINEMENT_ATTRIBUTES = [ - "time_for_finding_traces", - "time_for_finding_flaws", - "time_for_splitting_states", -] -attributes = ( - IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["search_start_memory", "init_time", "time_analysis"] + - REFINEMENT_ATTRIBUTES + - ["total_" + attr for attr in REFINEMENT_ATTRIBUTES]) -#exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -if len(REVISIONS) == 2: - for attribute in ["init_time", "expansions_until_last_jump", "total_time_for_splitting_states", "total_time_for_finding_traces"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue880/v2-900s.py b/experiments/issue880/v2-900s.py deleted file mode 100755 index 5cc7bd0cec..0000000000 --- a/experiments/issue880/v2-900s.py +++ /dev/null @@ -1,79 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -BUILD = "release64" -REVISIONS = ["issue880-base", "issue880-v2"] -DRIVER_OPTIONS = ["--build", BUILD] -CONFIGS = [ - IssueConfig( - nick, - config, - build_options=[BUILD], - driver_options=DRIVER_OPTIONS) - for nick, config in [ - ("cegar-original-900s", ["--search", "astar(cegar(subtasks=[original()], max_transitions=infinity, max_time=900))".format(**locals())]), - ] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = [ - "depot:p01.pddl", - "gripper:prob01.pddl"] - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -#exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -REFINEMENT_ATTRIBUTES = [ - "time_for_finding_traces", - "time_for_finding_flaws", - "time_for_splitting_states", -] -attributes = ( - IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["search_start_memory", "init_time", "time_analysis"] + - REFINEMENT_ATTRIBUTES + - ["total_" + attr for attr in REFINEMENT_ATTRIBUTES]) -#exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -if len(REVISIONS) == 2: - for attribute in ["init_time", "expansions_until_last_jump"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue880/v2-max-transitions.py b/experiments/issue880/v2-max-transitions.py deleted file mode 100755 index 144804a170..0000000000 --- a/experiments/issue880/v2-max-transitions.py +++ /dev/null @@ -1,80 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -BUILD = "release64" -REVISIONS = ["issue880-base", "issue880-v2"] -DRIVER_OPTIONS = ["--build", BUILD] -CONFIGS = [ - IssueConfig( - "{nick}-{million_transitions}M".format(**locals()), - config, - build_options=[BUILD], - driver_options=DRIVER_OPTIONS) - for million_transitions in [1, 2, 5, 10] - for nick, config in [ - ("cegar-original", ["--search", "astar(cegar(subtasks=[original()], max_transitions={max_transitions}))".format(max_transitions=million_transitions * 10**6)]), - ("cegar-landmarks-goals", ["--search", "astar(cegar(max_transitions={max_transitions}))".format(max_transitions=million_transitions * 10**6)]), - ] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = [ - "depot:p01.pddl", - "gripper:prob01.pddl"] - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -#exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -REFINEMENT_ATTRIBUTES = [ - "time_for_finding_traces", - "time_for_finding_flaws", - "time_for_splitting_states", -] -attributes = ( - IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["search_start_memory", "init_time", "time_analysis"] + - ["total_" + attr for attr in REFINEMENT_ATTRIBUTES]) -#exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -if len(REVISIONS) == 2: - for attribute in ["init_time", "expansions_until_last_jump"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue880/v2.py b/experiments/issue880/v2.py deleted file mode 100755 index 2f492c6d4e..0000000000 --- a/experiments/issue880/v2.py +++ /dev/null @@ -1,81 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -BUILD = "release64" -REVISIONS = ["issue880-v1", "issue880-v2"] -DRIVER_OPTIONS = ["--build", BUILD] -CONFIGS = [ - IssueConfig( - nick + "-" + max_transitions_nick, - config, - build_options=[BUILD], - driver_options=DRIVER_OPTIONS) - for max_transitions_nick, max_transitions in [("1M", 1000000)] - for nick, config in [ - ("cegar-original", ["--search", "astar(cegar(subtasks=[original()], max_transitions={max_transitions}))".format(**locals())]), - #("cegar-landmarks-goals", ["--search", "astar(cegar(max_transitions={max_transitions}))".format(**locals())]), - ] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = [ - "depot:p01.pddl", - "gripper:prob01.pddl"] - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -#exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -REFINEMENT_ATTRIBUTES = [ - "time_for_finding_traces", - "time_for_finding_flaws", - "time_for_splitting_states", -] -attributes = ( - IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["search_start_memory", "init_time", "time_analysis"] + - REFINEMENT_ATTRIBUTES + - ["total_" + attr for attr in REFINEMENT_ATTRIBUTES]) -#exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -if len(REVISIONS) == 2: - for attribute in ["init_time", "expansions_until_last_jump"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue883/common_setup.py b/experiments/issue883/common_setup.py deleted file mode 100644 index f64c5c15b3..0000000000 --- a/experiments/issue883/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue883/parser.py b/experiments/issue883/parser.py deleted file mode 100755 index d5cd0df10f..0000000000 --- a/experiments/issue883/parser.py +++ /dev/null @@ -1,104 +0,0 @@ -#! /usr/bin/env python - -import logging -import re - -from lab.parser import Parser - - -class CommonParser(Parser): - def add_difference(self, diff, val1, val2): - def diff_func(content, props): - if props.get(val1) is None or props.get(val2) is None: - diff_val = None - else: - diff_val = props.get(val1) - props.get(val2) - props[diff] = diff_val - self.add_function(diff_func) - - def _get_flags(self, flags_string): - flags = 0 - for char in flags_string: - flags |= getattr(re, char) - return flags - - def add_repeated_pattern( - self, name, regex, file="run.log", required=False, type=int, - flags=""): - def find_all_occurences(content, props): - matches = re.findall(regex, content, flags=self._get_flags(flags)) - if required and not matches: - logging.error("Pattern {0} not found in file {1}".format(regex, file)) - props[name] = [type(m) for m in matches] - - self.add_function(find_all_occurences, file=file) - - def add_pattern(self, name, regex, file="run.log", required=False, type=int, flags=""): - Parser.add_pattern(self, name, regex, file=file, required=required, type=type, flags=flags) - - def add_bottom_up_pattern(self, name, regex, file="run.log", required=True, type=int, flags=""): - - def search_from_bottom(content, props): - reversed_content = "\n".join(reversed(content.splitlines())) - match = re.search(regex, reversed_content, flags=self._get_flags(flags)) - if required and not match: - logging.error("Pattern {0} not found in file {1}".format(regex, file)) - if match: - props[name] = type(match.group(1)) - - self.add_function(search_from_bottom, file=file) - - -def no_search(content, props): - if "search_start_time" not in props: - error = props.get("error") - if error is not None and error != "incomplete-search-found-no-plan": - props["error"] = "no-search-due-to-" + error - - -REFINEMENT_ATTRIBUTES = [ - ("time_for_finding_traces", r"Time for finding abstract traces: (.+)s"), - ("time_for_finding_flaws", r"Time for finding flaws: (.+)s"), - ("time_for_splitting_states", r"Time for splitting states: (.+)s"), -] - - -def compute_total_times(content, props): - for attribute, pattern in REFINEMENT_ATTRIBUTES: - props["total_" + attribute] = sum(props[attribute]) - - -def add_time_analysis(content, props): - init_time = props.get("init_time") - if not init_time: - return - parts = [] - parts.append("{init_time:.2f}:".format(**props)) - for attribute, pattern in REFINEMENT_ATTRIBUTES: - time = props["total_" + attribute] - relative_time = time / init_time - print time, type(time) - parts.append("{:.2f} ({:.2f})".format(time, relative_time)) - - props["time_analysis"] = " ".join(parts) - - -def main(): - parser = CommonParser() - parser.add_pattern("search_start_time", r"\[g=0, 1 evaluated, 0 expanded, t=(.+)s, \d+ KB\]", type=float) - parser.add_pattern("search_start_memory", r"\[g=0, 1 evaluated, 0 expanded, t=.+s, (\d+) KB\]", type=int) - parser.add_pattern("init_time", r"Time for initializing additive Cartesian heuristic: (.+)s", type=float) - parser.add_pattern("cartesian_states", r"^Cartesian states: (\d+)\n", type=int) - - for attribute, pattern in REFINEMENT_ATTRIBUTES: - parser.add_repeated_pattern(attribute, pattern, type=float, required=False) - - parser.add_function(no_search) - parser.add_function(compute_total_times) - parser.add_function(add_time_analysis) - - parser.parse() - - -if __name__ == "__main__": - main() diff --git a/experiments/issue883/relativescatter.py b/experiments/issue883/relativescatter.py deleted file mode 100644 index d8033a3324..0000000000 --- a/experiments/issue883/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if not val1 or not val2: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue883/v1.py b/experiments/issue883/v1.py deleted file mode 100755 index dcadb8ae3c..0000000000 --- a/experiments/issue883/v1.py +++ /dev/null @@ -1,81 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -BUILD = "release64" -REVISIONS = ["issue883-base", "issue883-v1"] -DRIVER_OPTIONS = ["--build", BUILD] -CONFIGS = [ - IssueConfig( - nick + "-" + max_transitions_nick, - config, - build_options=[BUILD], - driver_options=DRIVER_OPTIONS) - for max_transitions_nick, max_transitions in [("1M", 1000000), ("2M", 2000000)] - for nick, config in [ - ("cegar-original", ["--search", "astar(cegar(subtasks=[original()], max_transitions={max_transitions}))".format(**locals())]), - ("cegar-landmarks-goals", ["--search", "astar(cegar(max_transitions={max_transitions}))".format(**locals())]), - ] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = [ - #"depot:p02.pddl", - "gripper:prob01.pddl"] - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -#exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -REFINEMENT_ATTRIBUTES = [ - "time_for_finding_traces", - "time_for_finding_flaws", - "time_for_splitting_states", -] -attributes = ( - IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["search_start_memory", "init_time", "time_analysis"] + - REFINEMENT_ATTRIBUTES + - ["total_" + attr for attr in REFINEMENT_ATTRIBUTES]) -#exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -if len(REVISIONS) == 2: - for attribute in ["init_time", "expansions_until_last_jump", "total_time_for_splitting_states", "total_time_for_finding_traces"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue884/common_setup.py b/experiments/issue884/common_setup.py deleted file mode 100644 index f64c5c15b3..0000000000 --- a/experiments/issue884/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue884/parser.py b/experiments/issue884/parser.py deleted file mode 100755 index c1645239bd..0000000000 --- a/experiments/issue884/parser.py +++ /dev/null @@ -1,113 +0,0 @@ -#! /usr/bin/env python - -import logging -import re - -from lab.parser import Parser - - -class CommonParser(Parser): - def add_difference(self, diff, val1, val2): - def diff_func(content, props): - if props.get(val1) is None or props.get(val2) is None: - diff_val = None - else: - diff_val = props.get(val1) - props.get(val2) - props[diff] = diff_val - self.add_function(diff_func) - - def _get_flags(self, flags_string): - flags = 0 - for char in flags_string: - flags |= getattr(re, char) - return flags - - def add_repeated_pattern( - self, name, regex, file="run.log", required=False, type=int, - flags=""): - def find_all_occurences(content, props): - matches = re.findall(regex, content, flags=self._get_flags(flags)) - if required and not matches: - logging.error("Pattern {} not found in file {}".format(regex, file)) - props[name] = [type(m) for m in matches] - - self.add_function(find_all_occurences, file=file) - - def add_pattern(self, name, regex, file="run.log", required=False, type=int, flags=""): - Parser.add_pattern(self, name, regex, file=file, required=required, type=type, flags=flags) - - def add_bottom_up_pattern(self, name, regex, file="run.log", required=True, type=int, flags=""): - - def search_from_bottom(content, props): - reversed_content = "\n".join(reversed(content.splitlines())) - match = re.search(regex, reversed_content, flags=self._get_flags(flags)) - if required and not match: - logging.error("Pattern {} not found in file {}".format(regex, file)) - if match: - props[name] = type(match.group(1)) - - self.add_function(search_from_bottom, file=file) - - -def no_search(content, props): - if "search_start_time" not in props: - error = props.get("error") - if error is not None and error != "incomplete-search-found-no-plan": - props["error"] = "no-search-due-to-" + error - - -REFINEMENT_TIMES = [ - ("time_for_finding_traces", r"Time for finding abstract traces: (.+)s"), - ("time_for_finding_flaws", r"Time for finding flaws: (.+)s"), - ("time_for_splitting_states", r"Time for splitting states: (.+)s"), -] - -REFINEMENT_VALUES = [ - ("loops", r"Looping transitions: (\d+)\n"), - ("transitions", r"Non-looping transitions: (\d+)\n"), -] - - -def compute_totals(content, props): - for attribute, pattern in REFINEMENT_TIMES + REFINEMENT_VALUES: - props["total_" + attribute] = sum(props[attribute]) - - -def add_time_analysis(content, props): - init_time = props.get("init_time") - if not init_time: - return - parts = [] - parts.append("{init_time:.2f}:".format(**props)) - for attribute, pattern in REFINEMENT_TIMES: - time = props["total_" + attribute] - relative_time = time / init_time - print time, type(time) - parts.append("{:.2f} ({:.2f})".format(time, relative_time)) - - props["time_analysis"] = " ".join(parts) - - -def main(): - parser = CommonParser() - parser.add_pattern("search_start_time", r"\[g=0, 1 evaluated, 0 expanded, t=(.+)s, \d+ KB\]", type=float) - parser.add_pattern("search_start_memory", r"\[g=0, 1 evaluated, 0 expanded, t=.+s, (\d+) KB\]", type=int) - parser.add_pattern("init_time", r"Time for initializing additive Cartesian heuristic: (.+)s", type=float) - parser.add_pattern("cartesian_states", r"Cartesian states: (\d+)\n", type=int) - parser.add_pattern("loops", r"Looping transitions: (\d+)\n", type=int) - parser.add_pattern("state_changing_transitions", r"Non-looping transitions: (\d+)\n", type=int) - - for attribute, pattern in REFINEMENT_TIMES: - parser.add_repeated_pattern(attribute, pattern, type=float, required=False) - for attribute, pattern in REFINEMENT_VALUES: - parser.add_repeated_pattern(attribute, pattern, type=int, required=False) - - parser.add_function(no_search) - parser.add_function(compute_totals) - parser.add_function(add_time_analysis) - - parser.parse() - - -if __name__ == "__main__": - main() diff --git a/experiments/issue884/relativescatter.py b/experiments/issue884/relativescatter.py deleted file mode 100644 index d8033a3324..0000000000 --- a/experiments/issue884/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if not val1 or not val2: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue884/v3.py b/experiments/issue884/v3.py deleted file mode 100755 index 451cb94328..0000000000 --- a/experiments/issue884/v3.py +++ /dev/null @@ -1,81 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -BUILD = "release64" -REVISIONS = ["issue884-base", "issue884-v1", "issue884-v2", "issue884-v3"] -DRIVER_OPTIONS = ["--build", BUILD] -CONFIGS = [ - IssueConfig( - nick + "-" + max_transitions_nick, - config, - build_options=[BUILD], - driver_options=DRIVER_OPTIONS) - for max_transitions_nick, max_transitions in [("2M", 2000000)] - for nick, config in [ - ("cegar-original", ["--search", "astar(cegar(subtasks=[original()], max_transitions={max_transitions}))".format(**locals())]), - ("cegar-landmarks-goals", ["--search", "astar(cegar(max_transitions={max_transitions}))".format(**locals())]), - ] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = [ - #"depot:p02.pddl", - "gripper:prob01.pddl"] - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -#exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -REFINEMENT_ATTRIBUTES = [ - "time_for_finding_traces", - "time_for_finding_flaws", - "time_for_splitting_states", -] -attributes = ( - IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - "search_start_memory", "init_time", "time_analysis", "total_loops", - "total_transitions"] + - ["total_" + attr for attr in REFINEMENT_ATTRIBUTES]) -exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -if len(REVISIONS) == 2: - for attribute in ["init_time", "total_time_for_finding_traces"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS)) - -exp.run_steps() diff --git a/experiments/issue884/v4.py b/experiments/issue884/v4.py deleted file mode 100755 index 246502cc1f..0000000000 --- a/experiments/issue884/v4.py +++ /dev/null @@ -1,81 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -BUILD = "release64" -REVISIONS = ["issue884-v2", "issue884-v2-heap-queue", "issue884-v4"] -DRIVER_OPTIONS = ["--build", BUILD] -CONFIGS = [ - IssueConfig( - nick + "-" + max_transitions_nick, - config, - build_options=[BUILD], - driver_options=DRIVER_OPTIONS) - for max_transitions_nick, max_transitions in [("2M", 2000000)] - for nick, config in [ - ("cegar-original", ["--search", "astar(cegar(subtasks=[original()], max_transitions={max_transitions}))".format(**locals())]), - ] -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = [ - #"depot:p02.pddl", - "gripper:prob01.pddl"] - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -#exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -REFINEMENT_ATTRIBUTES = [ - "time_for_finding_traces", - "time_for_finding_flaws", - "time_for_splitting_states", -] -attributes = ( - IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - "search_start_memory", "init_time", "time_analysis", "total_loops", - "total_transitions"] + - ["total_" + attr for attr in REFINEMENT_ATTRIBUTES]) -exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) - -for revisions in itertools.combinations(REVISIONS, 2): - for attribute in ["total_time_for_finding_traces"]: - for config in CONFIGS: - exp.add_report( - RelativeScatterPlotReport( - attributes=[attribute], - filter_algorithm=["{}-{}".format(rev, config.nick) for rev in revisions], - get_category=lambda run1, run2: run1.get("domain")), - outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *revisions)) - -exp.run_steps() diff --git a/experiments/issue887/common_setup.py b/experiments/issue887/common_setup.py deleted file mode 100644 index d3ef801cba..0000000000 --- a/experiments/issue887/common_setup.py +++ /dev/null @@ -1,401 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, outfile=None, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = outfile or os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-' + os.path.basename(outfile), subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - Use *suffix* to denote a step name and filename suffix if you - want to add multiple different comparison table steps. - - All *kwargs* except *suffix* will be passed to the - CompareConfigsReport class. If the keyword argument - *attributes* is not specified, a default list of attributes is - used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - suffix = kwargs.pop("suffix", "") - if suffix: - suffix = "-" + suffix - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare%s.%s" % ( - self.name, rev1, rev2, suffix, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare%s.html" % (self.name, rev1, rev2, suffix)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables%s" % suffix, make_comparison_tables) - self.add_step( - "publish-comparison-tables%s" % suffix, publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue887/relativescatter.py b/experiments/issue887/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue887/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue887/translator_additional_parser.py b/experiments/issue887/translator_additional_parser.py deleted file mode 100755 index 13a3802297..0000000000 --- a/experiments/issue887/translator_additional_parser.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -import hashlib - -from lab.parser import Parser - -def add_hash_value(content, props): - props['translator_output_sas_hash'] = hashlib.sha512(content).hexdigest() - -parser = Parser() -parser.add_function(add_hash_value, file="output.sas") -parser.parse() diff --git a/experiments/issue887/v1.py b/experiments/issue887/v1.py deleted file mode 100755 index 0cd1badebe..0000000000 --- a/experiments/issue887/v1.py +++ /dev/null @@ -1,249 +0,0 @@ -#! /usr/bin/env python2 -# -*- coding: utf-8 -*- - -from collections import defaultdict -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab import tools - -from downward.reports.compare import ComparativeReport -from downward.reports import PlanningReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -EXPNAME = common_setup.get_experiment_name() -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue887-base", "issue887-v1"] -CONFIGS = [ - IssueConfig( - "translate-only", - [], - driver_options=["--translate"]) -] -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="malte.helmert@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -# This was generated by running "./suites.py all" in the benchmarks -# repository. -SUITE = [ - 'agricola-opt18-strips', - 'agricola-sat18-strips', - 'airport', - 'airport-adl', - 'assembly', - 'barman-mco14-strips', - 'barman-opt11-strips', - 'barman-opt14-strips', - 'barman-sat11-strips', - 'barman-sat14-strips', - 'blocks', - 'caldera-opt18-adl', - 'caldera-sat18-adl', - 'caldera-split-opt18-adl', - 'caldera-split-sat18-adl', - 'cavediving-14-adl', - 'childsnack-opt14-strips', - 'childsnack-sat14-strips', - 'citycar-opt14-adl', - 'citycar-sat14-adl', - 'data-network-opt18-strips', - 'data-network-sat18-strips', - 'depot', - 'driverlog', - 'elevators-opt08-strips', - 'elevators-opt11-strips', - 'elevators-sat08-strips', - 'elevators-sat11-strips', - 'flashfill-sat18-adl', - 'floortile-opt11-strips', - 'floortile-opt14-strips', - 'floortile-sat11-strips', - 'floortile-sat14-strips', - 'freecell', - 'ged-opt14-strips', - 'ged-sat14-strips', - 'grid', - 'gripper', - 'hiking-agl14-strips', - 'hiking-opt14-strips', - 'hiking-sat14-strips', - 'logistics00', - 'logistics98', - 'maintenance-opt14-adl', - 'maintenance-sat14-adl', - 'miconic', - 'miconic-fulladl', - 'miconic-simpleadl', - 'movie', - 'mprime', - 'mystery', - 'no-mprime', - 'no-mystery', - 'nomystery-opt11-strips', - 'nomystery-sat11-strips', - 'nurikabe-opt18-adl', - 'nurikabe-sat18-adl', - 'openstacks', - 'openstacks-agl14-strips', - 'openstacks-opt08-adl', - 'openstacks-opt08-strips', - 'openstacks-opt11-strips', - 'openstacks-opt14-strips', - 'openstacks-sat08-adl', - 'openstacks-sat08-strips', - 'openstacks-sat11-strips', - 'openstacks-sat14-strips', - 'openstacks-strips', - 'optical-telegraphs', - 'organic-synthesis-opt18-strips', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-opt18-strips', - 'organic-synthesis-split-sat18-strips', - 'parcprinter-08-strips', - 'parcprinter-opt11-strips', - 'parcprinter-sat11-strips', - 'parking-opt11-strips', - 'parking-opt14-strips', - 'parking-sat11-strips', - 'parking-sat14-strips', - 'pathways', - 'pathways-noneg', - 'pegsol-08-strips', - 'pegsol-opt11-strips', - 'pegsol-sat11-strips', - 'petri-net-alignment-opt18-strips', - 'philosophers', - 'pipesworld-notankage', - 'pipesworld-tankage', - 'psr-large', - 'psr-middle', - 'psr-small', - 'rovers', - 'satellite', - 'scanalyzer-08-strips', - 'scanalyzer-opt11-strips', - 'scanalyzer-sat11-strips', - 'schedule', - 'settlers-opt18-adl', - 'settlers-sat18-adl', - 'snake-opt18-strips', - 'snake-sat18-strips', - 'sokoban-opt08-strips', - 'sokoban-opt11-strips', - 'sokoban-sat08-strips', - 'sokoban-sat11-strips', - 'spider-opt18-strips', - 'spider-sat18-strips', - 'storage', - 'termes-opt18-strips', - 'termes-sat18-strips', - 'tetris-opt14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'thoughtful-sat14-strips', - 'tidybot-opt11-strips', - 'tidybot-opt14-strips', - 'tidybot-sat11-strips', - 'tpp', - 'transport-opt08-strips', - 'transport-opt11-strips', - 'transport-opt14-strips', - 'transport-sat08-strips', - 'transport-sat11-strips', - 'transport-sat14-strips', - 'trucks', - 'trucks-strips', - 'visitall-opt11-strips', - 'visitall-opt14-strips', - 'visitall-sat11-strips', - 'visitall-sat14-strips', - 'woodworking-opt08-strips', - 'woodworking-opt11-strips', - 'woodworking-sat08-strips', - 'woodworking-sat11-strips', - 'zenotravel', -] - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("translator_additional_parser.py") - -del exp.commands['remove-output-sas'] - -class TranslatorDiffReport(PlanningReport): - def get_cell(self, run): - return ";".join(run.get(attr) for attr in self.attributes) - - def get_text(self): - lines = [] - for runs in self.problem_runs.values(): - hashes = set([r.get("translator_output_sas_hash") for r in runs]) - if len(hashes) > 1 or None in hashes: - lines.append(";".join([self.get_cell(r) for r in runs])) - return "\n".join(lines) - -class SameValueFilters(object): - """Ignore runs for a task where all algorithms have the same value.""" - def __init__(self, attribute): - self._attribute = attribute - self._tasks_to_values = defaultdict(list) - - def _get_task(self, run): - return (run['domain'], run['problem']) - - def store_values(self, run): - value = run.get(self._attribute) - self._tasks_to_values[self._get_task(run)].append(value) - # Don't filter this run, yet. - return True - - def filter_tasks_with_equal_values(self, run): - values = self._tasks_to_values[self._get_task(run)] - return len(set(values)) != 1 - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_parse_again_step() -exp.add_fetcher(name='fetch') - -ATTRIBUTES = ["error", "run_dir", "translator_*", "translator_output_sas_hash"] -# exp.add_absolute_report_step( -# outfile=os.path.join(exp.eval_dir, "{EXPNAME}.html".format(**locals())), -# attributes=ATTRIBUTES) -exp.add_comparison_table_step( - attributes=ATTRIBUTES) - -same_value_filters = SameValueFilters("translator_output_sas_hash") -# exp.add_absolute_report_step( -# outfile=os.path.join(exp.eval_dir, "{EXPNAME}-filtered.html".format(**locals())), -# attributes=ATTRIBUTES, -# filter=[same_value_filters.store_values, same_value_filters.filter_tasks_with_equal_values]) -exp.add_comparison_table_step( - suffix="filtered", - attributes=ATTRIBUTES, - filter=[same_value_filters.store_values, same_value_filters.filter_tasks_with_equal_values]) - -exp.add_report(TranslatorDiffReport( - attributes=["domain", "problem", "algorithm", "run_dir"] - ), outfile="different_output_sas.csv" -) - -exp.run_steps() diff --git a/experiments/issue891/common_setup.py b/experiments/issue891/common_setup.py deleted file mode 100644 index 9899f21500..0000000000 --- a/experiments/issue891/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue891/requirements.txt b/experiments/issue891/requirements.txt deleted file mode 100644 index b61be8f422..0000000000 --- a/experiments/issue891/requirements.txt +++ /dev/null @@ -1,12 +0,0 @@ -certifi==2020.6.20 -cycler==0.10.0 -kiwisolver==1.2.0 -lab==6.2 -matplotlib==3.3.2 -numpy==1.22.2 -Pillow==9.0.1 -pyparsing==2.4.7 -python-dateutil==2.8.1 -simplejson==3.17.2 -six==1.15.0 -txt2tags==3.7 diff --git a/experiments/issue891/v1-mips.py b/experiments/issue891/v1-mips.py deleted file mode 100755 index 990484847b..0000000000 --- a/experiments/issue891/v1-mips.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue891-v1"] -CONFIGS = [ - IssueConfig("opcount-lp", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=cplex, use_integer_operator_counts=false))"]), - IssueConfig("opcount-mip", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=cplex, use_integer_operator_counts=true))"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_report(ComparativeReport( - [("issue891-v1-opcount-lp", "issue891-v1-opcount-mip", "Diff (LP/MIP)")], - attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["initial_h_value"])) - -exp.add_scatter_plot_step(relative=False, attributes=["total_time", "memory"], - additional=[ - ("opcount-lp", "opcount-mip", "issue891-v1", "issue891-v1", "total_time"), - ("opcount-lp", "opcount-mip", "issue891-v1", "issue891-v1", "memory"), - ]) - -def interesting_h_value(run): - if "initial_h_value" in run and run["initial_h_value"] > 50: - run["initial_h_value"] = 51 - return run - -exp.add_report(ScatterPlotReport( - attributes=["initial_h_value"], - filter=interesting_h_value, - get_category=lambda run1, run2: run1["domain"], -)) - -exp.run_steps() diff --git a/experiments/issue891/v1.py b/experiments/issue891/v1.py deleted file mode 100755 index 37ea7938e1..0000000000 --- a/experiments/issue891/v1.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue891-base", "issue891-v1"] -CONFIGS = [ - IssueConfig("opcount-seq-lmcut-cplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=cplex))"]), - IssueConfig("diverse-potentials-cplex", ["--search", "astar(diverse_potentials(lpsolver=cplex,random_seed=1729))"]), - IssueConfig("optimal-lmcount-cplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=cplex))"]), - IssueConfig("opcount-seq-lmcut-soplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=soplex))"]), - IssueConfig("diverse-potentials-soplex", ["--search", "astar(diverse_potentials(lpsolver=soplex,random_seed=1729))"]), - IssueConfig("optimal-lmcount-soplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=soplex))"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time", "memory"]) - -exp.run_steps() diff --git a/experiments/issue899/common_setup.py b/experiments/issue899/common_setup.py deleted file mode 100644 index 47a24652e0..0000000000 --- a/experiments/issue899/common_setup.py +++ /dev/null @@ -1,393 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick) - report = report_class( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue899/relativescatter.py b/experiments/issue899/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue899/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue899/requirements.txt b/experiments/issue899/requirements.txt deleted file mode 100644 index c826f88f41..0000000000 --- a/experiments/issue899/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -lab==4.2 diff --git a/experiments/issue899/v1-opt.py b/experiments/issue899/v1-opt.py deleted file mode 100755 index c160c9b20f..0000000000 --- a/experiments/issue899/v1-opt.py +++ /dev/null @@ -1,51 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue899-base", "issue899-v1"] -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], driver_options=["--alias", "seq-opt-bjolp"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["search_time", "total_time"]) - -exp.run_steps() diff --git a/experiments/issue899/v1-sat.py b/experiments/issue899/v1-sat.py deleted file mode 100755 index 98a44d140b..0000000000 --- a/experiments/issue899/v1-sat.py +++ /dev/null @@ -1,66 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue899-base", "issue899-v1"] -CONFIGS = [ - IssueConfig("lama-first", [], driver_options=["--alias", "lama-first"]), - IssueConfig("lm_hm", [ - "--landmarks", "lm=lm_hm(2)", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy([hlm])"]), - IssueConfig("lm_exhaust", [ - "--landmarks", "lm=lm_exhaust()", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy([hlm])"]), - IssueConfig("lm_rhw", [ - "--landmarks", "lm=lm_rhw()", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy([hlm])"]), - IssueConfig("lm_zg", [ - "--landmarks", "lm=lm_zg()", - "--heuristic", "hlm=lmcount(lm)", - "--search", "lazy_greedy([hlm])"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() - -exp.run_steps() diff --git a/experiments/issue901/common_setup.py b/experiments/issue901/common_setup.py deleted file mode 100644 index 5231e82f99..0000000000 --- a/experiments/issue901/common_setup.py +++ /dev/null @@ -1,393 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick) - report = report_class( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue901/parser.py b/experiments/issue901/parser.py deleted file mode 100755 index d5cd0df10f..0000000000 --- a/experiments/issue901/parser.py +++ /dev/null @@ -1,104 +0,0 @@ -#! /usr/bin/env python - -import logging -import re - -from lab.parser import Parser - - -class CommonParser(Parser): - def add_difference(self, diff, val1, val2): - def diff_func(content, props): - if props.get(val1) is None or props.get(val2) is None: - diff_val = None - else: - diff_val = props.get(val1) - props.get(val2) - props[diff] = diff_val - self.add_function(diff_func) - - def _get_flags(self, flags_string): - flags = 0 - for char in flags_string: - flags |= getattr(re, char) - return flags - - def add_repeated_pattern( - self, name, regex, file="run.log", required=False, type=int, - flags=""): - def find_all_occurences(content, props): - matches = re.findall(regex, content, flags=self._get_flags(flags)) - if required and not matches: - logging.error("Pattern {0} not found in file {1}".format(regex, file)) - props[name] = [type(m) for m in matches] - - self.add_function(find_all_occurences, file=file) - - def add_pattern(self, name, regex, file="run.log", required=False, type=int, flags=""): - Parser.add_pattern(self, name, regex, file=file, required=required, type=type, flags=flags) - - def add_bottom_up_pattern(self, name, regex, file="run.log", required=True, type=int, flags=""): - - def search_from_bottom(content, props): - reversed_content = "\n".join(reversed(content.splitlines())) - match = re.search(regex, reversed_content, flags=self._get_flags(flags)) - if required and not match: - logging.error("Pattern {0} not found in file {1}".format(regex, file)) - if match: - props[name] = type(match.group(1)) - - self.add_function(search_from_bottom, file=file) - - -def no_search(content, props): - if "search_start_time" not in props: - error = props.get("error") - if error is not None and error != "incomplete-search-found-no-plan": - props["error"] = "no-search-due-to-" + error - - -REFINEMENT_ATTRIBUTES = [ - ("time_for_finding_traces", r"Time for finding abstract traces: (.+)s"), - ("time_for_finding_flaws", r"Time for finding flaws: (.+)s"), - ("time_for_splitting_states", r"Time for splitting states: (.+)s"), -] - - -def compute_total_times(content, props): - for attribute, pattern in REFINEMENT_ATTRIBUTES: - props["total_" + attribute] = sum(props[attribute]) - - -def add_time_analysis(content, props): - init_time = props.get("init_time") - if not init_time: - return - parts = [] - parts.append("{init_time:.2f}:".format(**props)) - for attribute, pattern in REFINEMENT_ATTRIBUTES: - time = props["total_" + attribute] - relative_time = time / init_time - print time, type(time) - parts.append("{:.2f} ({:.2f})".format(time, relative_time)) - - props["time_analysis"] = " ".join(parts) - - -def main(): - parser = CommonParser() - parser.add_pattern("search_start_time", r"\[g=0, 1 evaluated, 0 expanded, t=(.+)s, \d+ KB\]", type=float) - parser.add_pattern("search_start_memory", r"\[g=0, 1 evaluated, 0 expanded, t=.+s, (\d+) KB\]", type=int) - parser.add_pattern("init_time", r"Time for initializing additive Cartesian heuristic: (.+)s", type=float) - parser.add_pattern("cartesian_states", r"^Cartesian states: (\d+)\n", type=int) - - for attribute, pattern in REFINEMENT_ATTRIBUTES: - parser.add_repeated_pattern(attribute, pattern, type=float, required=False) - - parser.add_function(no_search) - parser.add_function(compute_total_times) - parser.add_function(add_time_analysis) - - parser.parse() - - -if __name__ == "__main__": - main() diff --git a/experiments/issue901/relativescatter.py b/experiments/issue901/relativescatter.py deleted file mode 100644 index d8033a3324..0000000000 --- a/experiments/issue901/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if not val1 or not val2: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue901/v1.py b/experiments/issue901/v1.py deleted file mode 100755 index d1bdedbefb..0000000000 --- a/experiments/issue901/v1.py +++ /dev/null @@ -1,63 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue901-base", "issue901-v1"] -CONFIGS = [ - IssueConfig("cegar-original", ["--search", "astar(cegar(subtasks=[original()], max_transitions=1M, max_time=infinity))"]), - IssueConfig("cegar-lm-goals", ["--search", "astar(cegar(subtasks=[landmarks(), goals()], max_transitions=1M, max_time=infinity))"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -REFINEMENT_ATTRIBUTES = [ - "time_for_finding_traces", - "time_for_finding_flaws", - "time_for_splitting_states", -] -attributes = ( - IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["search_start_memory", "init_time", "time_analysis"] + - ["total_" + attr for attr in REFINEMENT_ATTRIBUTES]) -#exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step(relative=True, attributes=["search_time", "total_time"]) - -exp.run_steps() diff --git a/experiments/issue901/v2.py b/experiments/issue901/v2.py deleted file mode 100755 index bc3d2cd3bb..0000000000 --- a/experiments/issue901/v2.py +++ /dev/null @@ -1,65 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue901-base", "issue901-v2"] -CONFIGS = [ - IssueConfig("cegar-original-1M", ["--search", "astar(cegar(subtasks=[original()], max_transitions=1M, max_time=infinity))"]), - IssueConfig("cegar-lm-goals-1M", ["--search", "astar(cegar(subtasks=[landmarks(), goals()], max_transitions=1M, max_time=infinity))"]), - IssueConfig("cegar-original-900s", ["--search", "astar(cegar(subtasks=[original()], max_transitions=infinity, max_time=900))"]), - IssueConfig("cegar-lm-goals-900s", ["--search", "astar(cegar(subtasks=[landmarks(), goals()], max_transitions=infinity, max_time=900))"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -REFINEMENT_ATTRIBUTES = [ - "time_for_finding_traces", - "time_for_finding_flaws", - "time_for_splitting_states", -] -attributes = ( - IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["search_start_memory", "init_time", "time_analysis"] + - ["total_" + attr for attr in REFINEMENT_ATTRIBUTES]) -#exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step(relative=True, attributes=["search_time", "total_time"]) - -exp.run_steps() diff --git a/experiments/issue903/common_setup.py b/experiments/issue903/common_setup.py deleted file mode 100644 index 5231e82f99..0000000000 --- a/experiments/issue903/common_setup.py +++ /dev/null @@ -1,393 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick) - report = report_class( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue903/parser.py b/experiments/issue903/parser.py deleted file mode 100755 index d5cd0df10f..0000000000 --- a/experiments/issue903/parser.py +++ /dev/null @@ -1,104 +0,0 @@ -#! /usr/bin/env python - -import logging -import re - -from lab.parser import Parser - - -class CommonParser(Parser): - def add_difference(self, diff, val1, val2): - def diff_func(content, props): - if props.get(val1) is None or props.get(val2) is None: - diff_val = None - else: - diff_val = props.get(val1) - props.get(val2) - props[diff] = diff_val - self.add_function(diff_func) - - def _get_flags(self, flags_string): - flags = 0 - for char in flags_string: - flags |= getattr(re, char) - return flags - - def add_repeated_pattern( - self, name, regex, file="run.log", required=False, type=int, - flags=""): - def find_all_occurences(content, props): - matches = re.findall(regex, content, flags=self._get_flags(flags)) - if required and not matches: - logging.error("Pattern {0} not found in file {1}".format(regex, file)) - props[name] = [type(m) for m in matches] - - self.add_function(find_all_occurences, file=file) - - def add_pattern(self, name, regex, file="run.log", required=False, type=int, flags=""): - Parser.add_pattern(self, name, regex, file=file, required=required, type=type, flags=flags) - - def add_bottom_up_pattern(self, name, regex, file="run.log", required=True, type=int, flags=""): - - def search_from_bottom(content, props): - reversed_content = "\n".join(reversed(content.splitlines())) - match = re.search(regex, reversed_content, flags=self._get_flags(flags)) - if required and not match: - logging.error("Pattern {0} not found in file {1}".format(regex, file)) - if match: - props[name] = type(match.group(1)) - - self.add_function(search_from_bottom, file=file) - - -def no_search(content, props): - if "search_start_time" not in props: - error = props.get("error") - if error is not None and error != "incomplete-search-found-no-plan": - props["error"] = "no-search-due-to-" + error - - -REFINEMENT_ATTRIBUTES = [ - ("time_for_finding_traces", r"Time for finding abstract traces: (.+)s"), - ("time_for_finding_flaws", r"Time for finding flaws: (.+)s"), - ("time_for_splitting_states", r"Time for splitting states: (.+)s"), -] - - -def compute_total_times(content, props): - for attribute, pattern in REFINEMENT_ATTRIBUTES: - props["total_" + attribute] = sum(props[attribute]) - - -def add_time_analysis(content, props): - init_time = props.get("init_time") - if not init_time: - return - parts = [] - parts.append("{init_time:.2f}:".format(**props)) - for attribute, pattern in REFINEMENT_ATTRIBUTES: - time = props["total_" + attribute] - relative_time = time / init_time - print time, type(time) - parts.append("{:.2f} ({:.2f})".format(time, relative_time)) - - props["time_analysis"] = " ".join(parts) - - -def main(): - parser = CommonParser() - parser.add_pattern("search_start_time", r"\[g=0, 1 evaluated, 0 expanded, t=(.+)s, \d+ KB\]", type=float) - parser.add_pattern("search_start_memory", r"\[g=0, 1 evaluated, 0 expanded, t=.+s, (\d+) KB\]", type=int) - parser.add_pattern("init_time", r"Time for initializing additive Cartesian heuristic: (.+)s", type=float) - parser.add_pattern("cartesian_states", r"^Cartesian states: (\d+)\n", type=int) - - for attribute, pattern in REFINEMENT_ATTRIBUTES: - parser.add_repeated_pattern(attribute, pattern, type=float, required=False) - - parser.add_function(no_search) - parser.add_function(compute_total_times) - parser.add_function(add_time_analysis) - - parser.parse() - - -if __name__ == "__main__": - main() diff --git a/experiments/issue903/relativescatter.py b/experiments/issue903/relativescatter.py deleted file mode 100644 index d8033a3324..0000000000 --- a/experiments/issue903/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if not val1 or not val2: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue903/v1.py b/experiments/issue903/v1.py deleted file mode 100755 index dbac97f6a6..0000000000 --- a/experiments/issue903/v1.py +++ /dev/null @@ -1,71 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue903-base", "issue903-v1"] -DRIVER_OPTIONS = ["--overall-time-limit", "5m"] -CONFIGS = [ - IssueConfig( - "cegar-original-1M", - ["--search", "astar(cegar(subtasks=[original()], max_transitions=1M, max_time=infinity))"], - driver_options=DRIVER_OPTIONS), - IssueConfig( - "cegar-lm-goals-1M", - ["--search", "astar(cegar(subtasks=[landmarks(), goals()], max_transitions=1M, max_time=infinity))"], - driver_options=DRIVER_OPTIONS), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -REFINEMENT_ATTRIBUTES = [ - "time_for_finding_traces", - "time_for_finding_flaws", - "time_for_splitting_states", -] -attributes = ( - IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["search_start_memory", "init_time", "time_analysis"] + - ["total_" + attr for attr in REFINEMENT_ATTRIBUTES]) -#exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step( - relative=True, attributes=["init_time", "search_time", "total_time"]) - -exp.run_steps() diff --git a/experiments/issue905/common_setup.py b/experiments/issue905/common_setup.py deleted file mode 100644 index 5231e82f99..0000000000 --- a/experiments/issue905/common_setup.py +++ /dev/null @@ -1,393 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick) - report = report_class( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue905/parser.py b/experiments/issue905/parser.py deleted file mode 100755 index d5cd0df10f..0000000000 --- a/experiments/issue905/parser.py +++ /dev/null @@ -1,104 +0,0 @@ -#! /usr/bin/env python - -import logging -import re - -from lab.parser import Parser - - -class CommonParser(Parser): - def add_difference(self, diff, val1, val2): - def diff_func(content, props): - if props.get(val1) is None or props.get(val2) is None: - diff_val = None - else: - diff_val = props.get(val1) - props.get(val2) - props[diff] = diff_val - self.add_function(diff_func) - - def _get_flags(self, flags_string): - flags = 0 - for char in flags_string: - flags |= getattr(re, char) - return flags - - def add_repeated_pattern( - self, name, regex, file="run.log", required=False, type=int, - flags=""): - def find_all_occurences(content, props): - matches = re.findall(regex, content, flags=self._get_flags(flags)) - if required and not matches: - logging.error("Pattern {0} not found in file {1}".format(regex, file)) - props[name] = [type(m) for m in matches] - - self.add_function(find_all_occurences, file=file) - - def add_pattern(self, name, regex, file="run.log", required=False, type=int, flags=""): - Parser.add_pattern(self, name, regex, file=file, required=required, type=type, flags=flags) - - def add_bottom_up_pattern(self, name, regex, file="run.log", required=True, type=int, flags=""): - - def search_from_bottom(content, props): - reversed_content = "\n".join(reversed(content.splitlines())) - match = re.search(regex, reversed_content, flags=self._get_flags(flags)) - if required and not match: - logging.error("Pattern {0} not found in file {1}".format(regex, file)) - if match: - props[name] = type(match.group(1)) - - self.add_function(search_from_bottom, file=file) - - -def no_search(content, props): - if "search_start_time" not in props: - error = props.get("error") - if error is not None and error != "incomplete-search-found-no-plan": - props["error"] = "no-search-due-to-" + error - - -REFINEMENT_ATTRIBUTES = [ - ("time_for_finding_traces", r"Time for finding abstract traces: (.+)s"), - ("time_for_finding_flaws", r"Time for finding flaws: (.+)s"), - ("time_for_splitting_states", r"Time for splitting states: (.+)s"), -] - - -def compute_total_times(content, props): - for attribute, pattern in REFINEMENT_ATTRIBUTES: - props["total_" + attribute] = sum(props[attribute]) - - -def add_time_analysis(content, props): - init_time = props.get("init_time") - if not init_time: - return - parts = [] - parts.append("{init_time:.2f}:".format(**props)) - for attribute, pattern in REFINEMENT_ATTRIBUTES: - time = props["total_" + attribute] - relative_time = time / init_time - print time, type(time) - parts.append("{:.2f} ({:.2f})".format(time, relative_time)) - - props["time_analysis"] = " ".join(parts) - - -def main(): - parser = CommonParser() - parser.add_pattern("search_start_time", r"\[g=0, 1 evaluated, 0 expanded, t=(.+)s, \d+ KB\]", type=float) - parser.add_pattern("search_start_memory", r"\[g=0, 1 evaluated, 0 expanded, t=.+s, (\d+) KB\]", type=int) - parser.add_pattern("init_time", r"Time for initializing additive Cartesian heuristic: (.+)s", type=float) - parser.add_pattern("cartesian_states", r"^Cartesian states: (\d+)\n", type=int) - - for attribute, pattern in REFINEMENT_ATTRIBUTES: - parser.add_repeated_pattern(attribute, pattern, type=float, required=False) - - parser.add_function(no_search) - parser.add_function(compute_total_times) - parser.add_function(add_time_analysis) - - parser.parse() - - -if __name__ == "__main__": - main() diff --git a/experiments/issue905/relativescatter.py b/experiments/issue905/relativescatter.py deleted file mode 100644 index d8033a3324..0000000000 --- a/experiments/issue905/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if not val1 or not val2: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue905/v1.py b/experiments/issue905/v1.py deleted file mode 100755 index a3be2576ca..0000000000 --- a/experiments/issue905/v1.py +++ /dev/null @@ -1,65 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue905-base", "issue905-v1"] -CONFIGS = [ - IssueConfig("cegar-original-1M", ["--search", "astar(cegar(subtasks=[original()], max_transitions=1M, max_time=infinity))"]), - IssueConfig("cegar-lm-goals-1M", ["--search", "astar(cegar(subtasks=[landmarks(), goals()], max_transitions=1M, max_time=infinity))"]), - IssueConfig("cegar-original-900s", ["--search", "astar(cegar(subtasks=[original()], max_transitions=infinity, max_time=900))"]), - IssueConfig("cegar-lm-goals-900s", ["--search", "astar(cegar(subtasks=[landmarks(), goals()], max_transitions=infinity, max_time=900))"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(os.path.join(DIR, "parser.py")) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -REFINEMENT_ATTRIBUTES = [ - "time_for_finding_traces", - "time_for_finding_flaws", - "time_for_splitting_states", -] -attributes = ( - IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + - ["search_start_memory", "init_time", "time_analysis"] + - ["total_" + attr for attr in REFINEMENT_ATTRIBUTES]) -#exp.add_absolute_report_step(attributes=attributes) -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step(relative=True, attributes=["init_time", "search_time", "total_time"]) - -exp.run_steps() diff --git a/experiments/issue908/common_setup.py b/experiments/issue908/common_setup.py deleted file mode 100644 index 5231e82f99..0000000000 --- a/experiments/issue908/common_setup.py +++ /dev/null @@ -1,393 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick) - report = report_class( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue908/parser.py b/experiments/issue908/parser.py deleted file mode 100755 index 0236c80fad..0000000000 --- a/experiments/issue908/parser.py +++ /dev/null @@ -1,10 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('generator_computation_time', 'generator computation time: (.+)s', required=False, type=float) -parser.add_pattern('cpdbs_computation_time', 'Canonical PDB heuristic computation time: (.+)s', required=False, type=float) -parser.add_pattern('dominance_pruning_time', 'Dominance pruning took (.+)s', required=False, type=float) - -parser.parse() diff --git a/experiments/issue908/relativescatter.py b/experiments/issue908/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue908/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue908/v1.py b/experiments/issue908/v1.py deleted file mode 100755 index 1ca3625dc1..0000000000 --- a/experiments/issue908/v1.py +++ /dev/null @@ -1,62 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue908-base", "issue908-v1"] -CONFIGS = [ - IssueConfig("cpdbs-hc", ['--search', 'astar(cpdbs(hillclimbing))']), - IssueConfig("cpdbs-sys2", ['--search', 'astar(cpdbs(systematic(2)))']), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') -exp.add_parse_again_step() - -attributes=exp.DEFAULT_TABLE_ATTRIBUTES -attributes.append( - Attribute('computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), -) - -#exp.add_absolute_report_step() -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step(relative=True, attributes=["search_time", "total_time"]) - -exp.run_steps() diff --git a/experiments/issue908/v2.py b/experiments/issue908/v2.py deleted file mode 100755 index 2ae5164a66..0000000000 --- a/experiments/issue908/v2.py +++ /dev/null @@ -1,63 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue908-base", "issue908-v2"] -CONFIGS = [ - IssueConfig("cpdbs-hc", ['--search', 'astar(cpdbs(hillclimbing))']), - IssueConfig("cpdbs-sys2", ['--search', 'astar(cpdbs(systematic(2)))']), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes=exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend([ - Attribute('generator_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), - Attribute('cpdbs_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), - Attribute('dominance_pruning_time', absolute=False, min_wins=True, functions=[geometric_mean]), -]) - -#exp.add_absolute_report_step() -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step(relative=True, attributes=['generator_computation_time', 'cpdbs_computation_time', 'dominance_pruning_time', "search_time", "total_time"]) - -exp.run_steps() diff --git a/experiments/issue908/v3.py b/experiments/issue908/v3.py deleted file mode 100755 index 31bf0b5fa5..0000000000 --- a/experiments/issue908/v3.py +++ /dev/null @@ -1,63 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue908-base", "issue908-v3"] -CONFIGS = [ - IssueConfig("cpdbs-hc", ['--search', 'astar(cpdbs(hillclimbing))']), - IssueConfig("cpdbs-sys2", ['--search', 'astar(cpdbs(systematic(2)))']), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes=exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend([ - Attribute('generator_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), - Attribute('cpdbs_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), - Attribute('dominance_pruning_time', absolute=False, min_wins=True, functions=[geometric_mean]), -]) - -#exp.add_absolute_report_step() -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step(relative=True, attributes=['generator_computation_time', 'cpdbs_computation_time', 'dominance_pruning_time', "search_time", "total_time"]) - -exp.run_steps() diff --git a/experiments/issue908/v4.py b/experiments/issue908/v4.py deleted file mode 100755 index b1b20ce4c4..0000000000 --- a/experiments/issue908/v4.py +++ /dev/null @@ -1,63 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue908-base", "issue908-v4"] -CONFIGS = [ - IssueConfig("cpdbs-hc", ['--search', 'astar(cpdbs(hillclimbing))']), - IssueConfig("cpdbs-sys2", ['--search', 'astar(cpdbs(systematic(2)))']), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes=exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend([ - Attribute('generator_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), - Attribute('cpdbs_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), - Attribute('dominance_pruning_time', absolute=False, min_wins=True, functions=[geometric_mean]), -]) - -#exp.add_absolute_report_step() -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step(relative=True, attributes=['generator_computation_time', 'cpdbs_computation_time', 'dominance_pruning_time', "search_time", "total_time"]) - -exp.run_steps() diff --git a/experiments/issue908/v5.py b/experiments/issue908/v5.py deleted file mode 100755 index 26e290d418..0000000000 --- a/experiments/issue908/v5.py +++ /dev/null @@ -1,63 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue908-v4", "issue908-v5"] -CONFIGS = [ - IssueConfig("cpdbs-hc", ['--search', 'astar(cpdbs(hillclimbing))']), - IssueConfig("cpdbs-sys2", ['--search', 'astar(cpdbs(systematic(2)))']), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes=exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend([ - Attribute('generator_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), - Attribute('cpdbs_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), - Attribute('dominance_pruning_time', absolute=False, min_wins=True, functions=[geometric_mean]), -]) - -#exp.add_absolute_report_step() -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step(relative=True, attributes=['generator_computation_time', 'cpdbs_computation_time', 'dominance_pruning_time', "search_time", "total_time"]) - -exp.run_steps() diff --git a/experiments/issue908/v6.py b/experiments/issue908/v6.py deleted file mode 100755 index 692f01cee5..0000000000 --- a/experiments/issue908/v6.py +++ /dev/null @@ -1,64 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue908-base", "issue908-v6"] -CONFIGS = [ - IssueConfig("cpdbs-hc", ['--search', 'astar(cpdbs(hillclimbing))']), - IssueConfig("cpdbs-sys2", ['--search', 'astar(cpdbs(systematic(2)))']), - IssueConfig("cpdbs-sys3", ['--search', 'astar(cpdbs(systematic(3)))']), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -attributes=exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend([ - Attribute('generator_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), - Attribute('cpdbs_computation_time', absolute=False, min_wins=True, functions=[geometric_mean]), - Attribute('dominance_pruning_time', absolute=False, min_wins=True, functions=[geometric_mean]), -]) - -#exp.add_absolute_report_step() -exp.add_comparison_table_step(attributes=attributes) -exp.add_scatter_plot_step(relative=True, attributes=['generator_computation_time', 'cpdbs_computation_time', 'dominance_pruning_time', "search_time", "total_time"]) - -exp.run_steps() diff --git a/experiments/issue914/common_setup.py b/experiments/issue914/common_setup.py deleted file mode 100644 index 47a24652e0..0000000000 --- a/experiments/issue914/common_setup.py +++ /dev/null @@ -1,393 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick) - report = report_class( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue914/ms-parser.py b/experiments/issue914/ms-parser.py deleted file mode 100755 index a7a8c4b897..0000000000 --- a/experiments/issue914/ms-parser.py +++ /dev/null @@ -1,71 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('ms_construction_time', 'Merge-and-shrink algorithm runtime: (.+)s', required=False, type=float) -parser.add_pattern('ms_atomic_construction_time', 'M&S algorithm timer: (.+)s \(after computation of atomic factors\)', required=False, type=float) -parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink algorithm: (\d+) KB', required=False, type=int) -parser.add_pattern('ms_num_remaining_factors', 'Number of remaining factors: (\d+)', required=False, type=int) -parser.add_pattern('ms_num_factors_kept', 'Number of factors kept: (\d+)', required=False, type=int) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_atomic_fts_constructed(content, props): - ms_atomic_construction_time = props.get('ms_atomic_construction_time') - ms_atomic_fts_constructed = False - if ms_atomic_construction_time is not None: - ms_atomic_fts_constructed = True - props['ms_atomic_fts_constructed'] = ms_atomic_fts_constructed - -parser.add_function(check_atomic_fts_constructed) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'success' and error != 'timeout' and error != 'out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'timeout': - ms_out_of_time = True - elif error == 'out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'timeout': - search_out_of_time = True - elif error == 'out-of-memory': - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -parser.parse() diff --git a/experiments/issue914/relativescatter.py b/experiments/issue914/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue914/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue914/v1.py b/experiments/issue914/v1.py deleted file mode 100755 index ea83498678..0000000000 --- a/experiments/issue914/v1.py +++ /dev/null @@ -1,93 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue914-base", "issue914-v1"] -BUILDS = ["release"] -CONFIG_NICKS = [ - ('dfp-b50k-t900', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']), - ('rl-b50k-t900', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']), - ('sccs-dfp-b50k-t900', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']), -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('ms-parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_atomic_fts_constructed = Attribute('ms_atomic_fts_constructed', absolute=True, min_wins=False) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_atomic_fts_constructed, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue914/v2.py b/experiments/issue914/v2.py deleted file mode 100755 index 985d29356f..0000000000 --- a/experiments/issue914/v2.py +++ /dev/null @@ -1,93 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue914-base", "issue914-v2"] -BUILDS = ["release"] -CONFIG_NICKS = [ - ('dfp-b50k-t900', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']), - ('rl-b50k-t900', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']), - ('sccs-dfp-b50k-t900', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']), -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('ms-parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_atomic_fts_constructed = Attribute('ms_atomic_fts_constructed', absolute=True, min_wins=False) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_atomic_fts_constructed, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue914/v3.py b/experiments/issue914/v3.py deleted file mode 100755 index c46654e89e..0000000000 --- a/experiments/issue914/v3.py +++ /dev/null @@ -1,93 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue914-base", "issue914-v3"] -BUILDS = ["release"] -CONFIG_NICKS = [ - ('dfp-b50k-t900', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']), - ('rl-b50k-t900', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']), - ('sccs-dfp-b50k-t900', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']), -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('ms-parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_atomic_fts_constructed = Attribute('ms_atomic_fts_constructed', absolute=True, min_wins=False) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_atomic_fts_constructed, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue914/v4.py b/experiments/issue914/v4.py deleted file mode 100755 index 36f0a1e7bb..0000000000 --- a/experiments/issue914/v4.py +++ /dev/null @@ -1,99 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue914-base", "issue914-v4"] -BUILDS = ["release"] -CONFIG_NICKS = [ - ('dfp-b50k-t900', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']), - ('rl-b50k-t900', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']), - ('sccs-dfp-b50k-t900', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']), -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('ms-parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_atomic_fts_constructed = Attribute('ms_atomic_fts_constructed', absolute=True, min_wins=False) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -ms_memory_delta = Attribute('ms_memory_delta', absolute=False, min_wins=True) -ms_num_remaining_factors = Attribute('ms_num_remaining_factors', absolute=False, min_wins=False) -ms_num_factors_kept = Attribute('ms_num_factors_kept', absolute=False, min_wins=False) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_atomic_fts_constructed, - ms_out_of_memory, - ms_out_of_time, - ms_memory_delta, - ms_num_remaining_factors, - ms_num_factors_kept, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_comparison_table_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue915/common_setup.py b/experiments/issue915/common_setup.py deleted file mode 100644 index 47a24652e0..0000000000 --- a/experiments/issue915/common_setup.py +++ /dev/null @@ -1,393 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick) - report = report_class( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue915/ms-parser.py b/experiments/issue915/ms-parser.py deleted file mode 100755 index b1c77c1645..0000000000 --- a/experiments/issue915/ms-parser.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern('ms_construction_time', 'Merge-and-shrink algorithm runtime: (.+)s', required=False, type=float) -parser.add_pattern('ms_atomic_construction_time', 'M&S algorithm timer: (.+)s \(after computation of atomic factors\)', required=False, type=float) -parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink algorithm: (\d+) KB', required=False, type=int) - -def check_ms_constructed(content, props): - ms_construction_time = props.get('ms_construction_time') - abstraction_constructed = False - if ms_construction_time is not None: - abstraction_constructed = True - props['ms_abstraction_constructed'] = abstraction_constructed - -parser.add_function(check_ms_constructed) - -def check_atomic_fts_constructed(content, props): - ms_atomic_construction_time = props.get('ms_atomic_construction_time') - ms_atomic_fts_constructed = False - if ms_atomic_construction_time is not None: - ms_atomic_fts_constructed = True - props['ms_atomic_fts_constructed'] = ms_atomic_fts_constructed - -parser.add_function(check_atomic_fts_constructed) - -def check_planner_exit_reason(content, props): - ms_abstraction_constructed = props.get('ms_abstraction_constructed') - error = props.get('error') - if error != 'success' and error != 'search-out-of-time' and error != 'search-out-of-memory': - print 'error: %s' % error - return - - # Check whether merge-and-shrink computation or search ran out of - # time or memory. - ms_out_of_time = False - ms_out_of_memory = False - search_out_of_time = False - search_out_of_memory = False - if ms_abstraction_constructed == False: - if error == 'search-out-of-time': - ms_out_of_time = True - elif error == 'search-out-of-memory': - ms_out_of_memory = True - elif ms_abstraction_constructed == True: - if error == 'search-out-of-time': - search_out_of_time = True - elif error == 'search-out-of-memory': - search_out_of_memory = True - search_out_of_memory = True - props['ms_out_of_time'] = ms_out_of_time - props['ms_out_of_memory'] = ms_out_of_memory - props['search_out_of_time'] = search_out_of_time - props['search_out_of_memory'] = search_out_of_memory - -parser.add_function(check_planner_exit_reason) - -def check_perfect_heuristic(content, props): - plan_length = props.get('plan_length') - expansions = props.get('expansions') - if plan_length != None: - perfect_heuristic = False - if plan_length + 1 == expansions: - perfect_heuristic = True - props['perfect_heuristic'] = perfect_heuristic - -parser.add_function(check_perfect_heuristic) - -parser.parse() diff --git a/experiments/issue915/relativescatter.py b/experiments/issue915/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue915/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue915/v1.py b/experiments/issue915/v1.py deleted file mode 100755 index 801812baf1..0000000000 --- a/experiments/issue915/v1.py +++ /dev/null @@ -1,93 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute, geometric_mean - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue915-v1"] -BUILDS = ["debug"] -CONFIG_NICKS = [ - ('b50k-dfp-t900', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']), - ('b50k-rl-t900', ['--search', 'astar(merge_and_shrink(merge_strategy=merge_precomputed(merge_tree=linear(variable_order=reverse_level)),shrink_strategy=shrink_bisimulation(greedy=false),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']), - ('b50k-sccs-dfp-t900', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1,main_loop_max_time=900))']), -] -CONFIGS = [ - IssueConfig( - config_nick, - config, - build_options=[build], - driver_options=["--build", build]) - for build in BUILDS - for config_nick, config in CONFIG_NICKS -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="silvan.sievers@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser('ms-parser.py') - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -# planner outcome attributes -perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) - -# m&s attributes -ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) -ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) -ms_atomic_fts_constructed = Attribute('ms_atomic_fts_constructed', absolute=True, min_wins=False) -ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) -ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) -search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) -search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) - -extra_attributes = [ - perfect_heuristic, - - ms_construction_time, - ms_atomic_construction_time, - ms_abstraction_constructed, - ms_atomic_fts_constructed, - ms_out_of_memory, - ms_out_of_time, - search_out_of_memory, - search_out_of_time, -] -attributes = exp.DEFAULT_TABLE_ATTRIBUTES -attributes.extend(extra_attributes) - -exp.add_absolute_report_step(attributes=attributes) - -exp.run_steps() diff --git a/experiments/issue919/common_setup.py b/experiments/issue919/common_setup.py deleted file mode 100644 index d3ef801cba..0000000000 --- a/experiments/issue919/common_setup.py +++ /dev/null @@ -1,401 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, outfile=None, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = outfile or os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-' + os.path.basename(outfile), subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - Use *suffix* to denote a step name and filename suffix if you - want to add multiple different comparison table steps. - - All *kwargs* except *suffix* will be passed to the - CompareConfigsReport class. If the keyword argument - *attributes* is not specified, a default list of attributes is - used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - suffix = kwargs.pop("suffix", "") - if suffix: - suffix = "-" + suffix - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare%s.%s" % ( - self.name, rev1, rev2, suffix, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare%s.html" % (self.name, rev1, rev2, suffix)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables%s" % suffix, make_comparison_tables) - self.add_step( - "publish-comparison-tables%s" % suffix, publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue919/relativescatter.py b/experiments/issue919/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue919/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue919/translator_additional_parser.py b/experiments/issue919/translator_additional_parser.py deleted file mode 100755 index 13a3802297..0000000000 --- a/experiments/issue919/translator_additional_parser.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -import hashlib - -from lab.parser import Parser - -def add_hash_value(content, props): - props['translator_output_sas_hash'] = hashlib.sha512(content).hexdigest() - -parser = Parser() -parser.add_function(add_hash_value, file="output.sas") -parser.parse() diff --git a/experiments/issue919/v1.py b/experiments/issue919/v1.py deleted file mode 100755 index d3e194b688..0000000000 --- a/experiments/issue919/v1.py +++ /dev/null @@ -1,248 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from collections import defaultdict -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab import tools - -from downward.reports.compare import ComparativeReport -from downward.reports import PlanningReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -EXPNAME = common_setup.get_experiment_name() -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue919-base", "issue919-v1"] -CONFIGS = [ - IssueConfig( - "translate-only", - [], - driver_options=["--translate"]) -] -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="jendrik.seipp@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -# This was generated by running "./suites.py all" in the benchmarks -# repository. -SUITE = [ - 'agricola-opt18-strips', - 'agricola-sat18-strips', - 'airport', - 'airport-adl', - 'assembly', - 'barman-mco14-strips', - 'barman-opt11-strips', - 'barman-opt14-strips', - 'barman-sat11-strips', - 'barman-sat14-strips', - 'blocks', - 'caldera-opt18-adl', - 'caldera-sat18-adl', - 'caldera-split-opt18-adl', - 'caldera-split-sat18-adl', - 'cavediving-14-adl', - 'childsnack-opt14-strips', - 'childsnack-sat14-strips', - 'citycar-opt14-adl', - 'citycar-sat14-adl', - 'data-network-opt18-strips', - 'data-network-sat18-strips', - 'depot', - 'driverlog', - 'elevators-opt08-strips', - 'elevators-opt11-strips', - 'elevators-sat08-strips', - 'elevators-sat11-strips', - 'flashfill-sat18-adl', - 'floortile-opt11-strips', - 'floortile-opt14-strips', - 'floortile-sat11-strips', - 'floortile-sat14-strips', - 'freecell', - 'ged-opt14-strips', - 'ged-sat14-strips', - 'grid', - 'gripper', - 'hiking-agl14-strips', - 'hiking-opt14-strips', - 'hiking-sat14-strips', - 'logistics00', - 'logistics98', - 'maintenance-opt14-adl', - 'maintenance-sat14-adl', - 'miconic', - 'miconic-fulladl', - 'miconic-simpleadl', - 'movie', - 'mprime', - 'mystery', - 'no-mprime', - 'no-mystery', - 'nomystery-opt11-strips', - 'nomystery-sat11-strips', - 'nurikabe-opt18-adl', - 'nurikabe-sat18-adl', - 'openstacks', - 'openstacks-agl14-strips', - 'openstacks-opt08-adl', - 'openstacks-opt08-strips', - 'openstacks-opt11-strips', - 'openstacks-opt14-strips', - 'openstacks-sat08-adl', - 'openstacks-sat08-strips', - 'openstacks-sat11-strips', - 'openstacks-sat14-strips', - 'openstacks-strips', - 'optical-telegraphs', - 'organic-synthesis-opt18-strips', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-opt18-strips', - 'organic-synthesis-split-sat18-strips', - 'parcprinter-08-strips', - 'parcprinter-opt11-strips', - 'parcprinter-sat11-strips', - 'parking-opt11-strips', - 'parking-opt14-strips', - 'parking-sat11-strips', - 'parking-sat14-strips', - 'pathways', - 'pathways-noneg', - 'pegsol-08-strips', - 'pegsol-opt11-strips', - 'pegsol-sat11-strips', - 'petri-net-alignment-opt18-strips', - 'philosophers', - 'pipesworld-notankage', - 'pipesworld-tankage', - 'psr-large', - 'psr-middle', - 'psr-small', - 'rovers', - 'satellite', - 'scanalyzer-08-strips', - 'scanalyzer-opt11-strips', - 'scanalyzer-sat11-strips', - 'schedule', - 'settlers-opt18-adl', - 'settlers-sat18-adl', - 'snake-opt18-strips', - 'snake-sat18-strips', - 'sokoban-opt08-strips', - 'sokoban-opt11-strips', - 'sokoban-sat08-strips', - 'sokoban-sat11-strips', - 'spider-opt18-strips', - 'spider-sat18-strips', - 'storage', - 'termes-opt18-strips', - 'termes-sat18-strips', - 'tetris-opt14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'thoughtful-sat14-strips', - 'tidybot-opt11-strips', - 'tidybot-opt14-strips', - 'tidybot-sat11-strips', - 'tpp', - 'transport-opt08-strips', - 'transport-opt11-strips', - 'transport-opt14-strips', - 'transport-sat08-strips', - 'transport-sat11-strips', - 'transport-sat14-strips', - 'trucks', - 'trucks-strips', - 'visitall-opt11-strips', - 'visitall-opt14-strips', - 'visitall-sat11-strips', - 'visitall-sat14-strips', - 'woodworking-opt08-strips', - 'woodworking-opt11-strips', - 'woodworking-sat08-strips', - 'woodworking-sat11-strips', - 'zenotravel', -] - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=1) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("translator_additional_parser.py") - -del exp.commands['remove-output-sas'] - -class TranslatorDiffReport(PlanningReport): - def get_cell(self, run): - return ";".join(run.get(attr) for attr in self.attributes) - - def get_text(self): - lines = [] - for runs in self.problem_runs.values(): - hashes = set([r.get("translator_output_sas_hash") for r in runs]) - if len(hashes) > 1 or None in hashes: - lines.append(";".join([self.get_cell(r) for r in runs])) - return "\n".join(lines) - -class SameValueFilters(object): - """Ignore runs for a task where all algorithms have the same value.""" - def __init__(self, attribute): - self._attribute = attribute - self._tasks_to_values = defaultdict(list) - - def _get_task(self, run): - return (run['domain'], run['problem']) - - def store_values(self, run): - value = run.get(self._attribute) - self._tasks_to_values[self._get_task(run)].append(value) - # Don't filter this run, yet. - return True - - def filter_tasks_with_equal_values(self, run): - values = self._tasks_to_values[self._get_task(run)] - return len(set(values)) != 1 - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -ATTRIBUTES = ["error", "run_dir", "translator_*", "translator_output_sas_hash"] -# exp.add_absolute_report_step( -# outfile=os.path.join(exp.eval_dir, "{EXPNAME}.html".format(**locals())), -# attributes=ATTRIBUTES) -exp.add_comparison_table_step( - attributes=ATTRIBUTES) - -same_value_filters = SameValueFilters("translator_output_sas_hash") -# exp.add_absolute_report_step( -# outfile=os.path.join(exp.eval_dir, "{EXPNAME}-filtered.html".format(**locals())), -# attributes=ATTRIBUTES, -# filter=[same_value_filters.store_values, same_value_filters.filter_tasks_with_equal_values]) -exp.add_comparison_table_step( - suffix="filtered", - attributes=ATTRIBUTES, - filter=[same_value_filters.store_values, same_value_filters.filter_tasks_with_equal_values]) - -exp.add_report(TranslatorDiffReport( - attributes=["domain", "problem", "algorithm", "run_dir"] - ), outfile="different_output_sas.csv" -) - -exp.run_steps() diff --git a/experiments/issue925/common_setup.py b/experiments/issue925/common_setup.py deleted file mode 100644 index 687019c482..0000000000 --- a/experiments/issue925/common_setup.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'airport', 'barman-opt11-strips', 'barman-opt14-strips', 'blocks', - 'childsnack-opt14-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'pipesworld-notankage', - 'pipesworld-tankage', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'sokoban-opt08-strips', 'sokoban-opt11-strips', 'storage', - 'tetris-opt14-strips', 'tidybot-opt11-strips', - 'tidybot-opt14-strips', 'tpp', 'transport-opt08-strips', - 'transport-opt11-strips', 'transport-opt14-strips', - 'trucks-strips', 'visitall-opt11-strips', 'visitall-opt14-strips', - 'woodworking-opt08-strips', 'woodworking-opt11-strips', - 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'airport', 'assembly', 'barman-sat11-strips', - 'barman-sat14-strips', 'blocks', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', 'depot', - 'driverlog', 'elevators-sat08-strips', 'elevators-sat11-strips', - 'floortile-sat11-strips', 'floortile-sat14-strips', 'freecell', - 'ged-sat14-strips', 'grid', 'gripper', 'hiking-sat14-strips', - 'logistics00', 'logistics98', 'maintenance-sat14-adl', 'miconic', - 'miconic-fulladl', 'miconic-simpleadl', 'movie', 'mprime', - 'mystery', 'nomystery-sat11-strips', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage', - 'tetris-sat14-strips', 'thoughtful-sat14-strips', - 'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips', - 'transport-sat11-strips', 'transport-sat14-strips', 'trucks', - 'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print "Make scatter plot for", name - algo1 = "{}-{}".format(rev1, config_nick) - algo2 = "{}-{}".format(rev2, config_nick) - report = report_class( - filter_config=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"], - legend_location=(1.3, 0.5)) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue925/relativescatter.py b/experiments/issue925/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue925/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue925/v1.py b/experiments/issue925/v1.py deleted file mode 100755 index b4df5033c1..0000000000 --- a/experiments/issue925/v1.py +++ /dev/null @@ -1,57 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment -from relativescatter import RelativeScatterPlotReport -from itertools import combinations - -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -# These revisions are all tag experimental branches off the same revision. -# we only need different tags so lab creates separate build directories in the build cache. -# We then manually recompile the code in the build cache with the correct settings. -REVISIONS = ["issue925-cplex12.8-static", "issue925-cplex12.8-dynamic", "issue925-cplex12.9-static", "issue925-cplex12.9-dynamic"] -CONFIGS = [ - IssueConfig("opcount-seq-lmcut", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()]))"]), - IssueConfig("diverse-potentials", ["--search", "astar(diverse_potentials())"]), - IssueConfig("optimal-lmcount", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true))"]), -] -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment(email="florian.pommerening@unibas.ch") - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -exp.add_comparison_table_step() - -for r1, r2 in combinations(REVISIONS, 2): - for nick in ["opcount-seq-lmcut", "diverse-potentials", "optimal-lmcount"]: - exp.add_report(RelativeScatterPlotReport( - attributes=["total_time"], - filter_algorithm=["%s-%s" % (r, nick) for r in [r1, r2]], - get_category=lambda run1, run2: run1["domain"]), - outfile="issue925-v1-total-time-%s-%s-%s.png" % (r1, r2, nick)) - -exp.run_steps() diff --git a/experiments/issue937/common_setup.py b/experiments/issue937/common_setup.py deleted file mode 100644 index eeca3aadb5..0000000000 --- a/experiments/issue937/common_setup.py +++ /dev/null @@ -1,395 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, lambda: make_scatter_plots) diff --git a/experiments/issue937/landmark_parser.py b/experiments/issue937/landmark_parser.py deleted file mode 100755 index 943492471b..0000000000 --- a/experiments/issue937/landmark_parser.py +++ /dev/null @@ -1,29 +0,0 @@ -#! /usr/bin/env python - -import re - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern( - "lmgraph_generation_time", - r"Landmark graph generation time: (.+)s", - type=float) -parser.add_pattern( - "landmarks", - r"Landmark graph contains (\d+) landmarks, of which \d+ are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_disjunctive", - r"Landmark graph contains \d+ landmarks, of which (\d+) are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_conjunctive", - r"Landmark graph contains \d+ landmarks, of which \d+ are disjunctive and (\d+) are conjunctive.", - type=int) -parser.add_pattern( - "orderings", - r"Landmark graph contains (\d+) orderings.", - type=int) - -parser.parse() diff --git a/experiments/issue937/v1-optimal.py b/experiments/issue937/v1-optimal.py deleted file mode 100755 index 31f32b5415..0000000000 --- a/experiments/issue937/v1-optimal.py +++ /dev/null @@ -1,93 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue937-base-seq-opt-bjolp", "issue937-v1-seq-opt-bjolp"), - ("issue937-base-seq-opt-bjolp-opt", "issue937-v1-seq-opt-bjolp-opt"), - ("issue937-base-lm-exhaust", "issue937-v1-lm-exhaust"), - ("issue937-base-lm-hm2", "issue937-v1-lm-hm2"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - - exp.add_report(report) - - -REVISIONS = [ - "issue937-base", - "issue937-v1", -] - -CONFIGS = [ - common_setup.IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), - common_setup.IssueConfig( - "lm-exhaust", ["--evaluator", - "lmc=lmcount(lm_exhaust(),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - common_setup.IssueConfig( - "lm-hm2", ["--evaluator", - "lmc=lmcount(lm_hm(m=2),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - common_setup.IssueConfig( - "seq-opt-bjolp-opt", ["--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue937/v1-satisficing.py b/experiments/issue937/v1-satisficing.py deleted file mode 100755 index a789e27fd6..0000000000 --- a/experiments/issue937/v1-satisficing.py +++ /dev/null @@ -1,89 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup - -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue937-base-lama-first", "issue937-v1-lama-first"), - ("issue937-base-lama-first-pref", "issue937-v1-lama-first-pref"), - ("issue937-base-lm-zg", "issue937-v1-lm-zg"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - exp.add_report(report) - -REVISIONS = [ - "issue937-base", - "issue937-v1", -] - -CONFIGS = [ - common_setup.IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), - common_setup.IssueConfig( - "lama-first-pref", ["--evaluator", - "hlm=lmcount(lm_factory=lm_reasonable_orders_hps(" - "lm_rhw()),transform=adapt_costs(one),pref=true)", - "--evaluator", "hff=ff(transform=adapt_costs(one))", - "--search", - """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), - common_setup.IssueConfig("lm-zg", [ - "--search", "eager_greedy([lmcount(lm_zg())])"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("disjunctive_landmarks", min_wins=False), - Attribute("conjunctive_landmarks", min_wins=False), - Attribute("orderings", min_wins=False), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue939/base.py b/experiments/issue939/base.py deleted file mode 100755 index 38863c2329..0000000000 --- a/experiments/issue939/base.py +++ /dev/null @@ -1,190 +0,0 @@ -#! /usr/bin/env python2 -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -EXPNAME = common_setup.get_experiment_name() -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue939-base"] -CONFIGS = [ - IssueConfig( - "translate-only", - [], - driver_options=["--translate"]) -] -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="cedric.geissmann@unibas.ch") - -# This was generated by running "./suites.py all" in the benchmarks -# repository. -SUITE = [ - 'agricola-opt18-strips', - 'agricola-sat18-strips', - 'airport', - 'airport-adl', - 'assembly', - 'barman-mco14-strips', - 'barman-opt11-strips', - 'barman-opt14-strips', - 'barman-sat11-strips', - 'barman-sat14-strips', - 'blocks', - 'caldera-opt18-adl', - 'caldera-sat18-adl', - 'caldera-split-opt18-adl', - 'caldera-split-sat18-adl', - 'cavediving-14-adl', - 'childsnack-opt14-strips', - 'childsnack-sat14-strips', - 'citycar-opt14-adl', - 'citycar-sat14-adl', - 'data-network-opt18-strips', - 'data-network-sat18-strips', - 'depot', - 'driverlog', - 'elevators-opt08-strips', - 'elevators-opt11-strips', - 'elevators-sat08-strips', - 'elevators-sat11-strips', - 'flashfill-sat18-adl', - 'floortile-opt11-strips', - 'floortile-opt14-strips', - 'floortile-sat11-strips', - 'floortile-sat14-strips', - 'freecell', - 'ged-opt14-strips', - 'ged-sat14-strips', - 'grid', - 'gripper', - 'hiking-agl14-strips', - 'hiking-opt14-strips', - 'hiking-sat14-strips', - 'logistics00', - 'logistics98', - 'maintenance-opt14-adl', - 'maintenance-sat14-adl', - 'miconic', - 'miconic-fulladl', - 'miconic-simpleadl', - 'movie', - 'mprime', - 'mystery', - 'no-mprime', - 'no-mystery', - 'nomystery-opt11-strips', - 'nomystery-sat11-strips', - 'nurikabe-opt18-adl', - 'nurikabe-sat18-adl', - 'openstacks', - 'openstacks-agl14-strips', - 'openstacks-opt08-adl', - 'openstacks-opt08-strips', - 'openstacks-opt11-strips', - 'openstacks-opt14-strips', - 'openstacks-sat08-adl', - 'openstacks-sat08-strips', - 'openstacks-sat11-strips', - 'openstacks-sat14-strips', - 'openstacks-strips', - 'optical-telegraphs', - 'organic-synthesis-opt18-strips', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-opt18-strips', - 'organic-synthesis-split-sat18-strips', - 'parcprinter-08-strips', - 'parcprinter-opt11-strips', - 'parcprinter-sat11-strips', - 'parking-opt11-strips', - 'parking-opt14-strips', - 'parking-sat11-strips', - 'parking-sat14-strips', - 'pathways', - 'pathways-noneg', - 'pegsol-08-strips', - 'pegsol-opt11-strips', - 'pegsol-sat11-strips', - 'petri-net-alignment-opt18-strips', - 'philosophers', - 'pipesworld-notankage', - 'pipesworld-tankage', - 'psr-large', - 'psr-middle', - 'psr-small', - 'rovers', - 'satellite', - 'scanalyzer-08-strips', - 'scanalyzer-opt11-strips', - 'scanalyzer-sat11-strips', - 'schedule', - 'settlers-opt18-adl', - 'settlers-sat18-adl', - 'snake-opt18-strips', - 'snake-sat18-strips', - 'sokoban-opt08-strips', - 'sokoban-opt11-strips', - 'sokoban-sat08-strips', - 'sokoban-sat11-strips', - 'spider-opt18-strips', - 'spider-sat18-strips', - 'storage', - 'termes-opt18-strips', - 'termes-sat18-strips', - 'tetris-opt14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'thoughtful-sat14-strips', - 'tidybot-opt11-strips', - 'tidybot-opt14-strips', - 'tidybot-sat11-strips', - 'tpp', - 'transport-opt08-strips', - 'transport-opt11-strips', - 'transport-opt14-strips', - 'transport-sat08-strips', - 'transport-sat11-strips', - 'transport-sat14-strips', - 'trucks', - 'trucks-strips', - 'visitall-opt11-strips', - 'visitall-opt14-strips', - 'visitall-sat11-strips', - 'visitall-sat14-strips', - 'woodworking-opt08-strips', - 'woodworking-opt11-strips', - 'woodworking-sat08-strips', - 'woodworking-sat11-strips', - 'zenotravel', -] - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("translator_additional_parser.py") - -del exp.commands['remove-output-sas'] - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_parse_again_step() -exp.add_fetcher(name='fetch') - -exp.run_steps() diff --git a/experiments/issue939/common_setup.py b/experiments/issue939/common_setup.py deleted file mode 100644 index 47a24652e0..0000000000 --- a/experiments/issue939/common_setup.py +++ /dev/null @@ -1,393 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - -from relativescatter import RelativeScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".hg" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".hg")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - report_class = RelativeScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - report_class = ScatterPlotReport - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick) - report = report_class( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue939/fetch.py b/experiments/issue939/fetch.py deleted file mode 100755 index 762945aa7e..0000000000 --- a/experiments/issue939/fetch.py +++ /dev/null @@ -1,84 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -from collections import defaultdict -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.experiment import Experiment - -from downward.reports import PlanningReport -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport - -import common_setup - - -DIR = os.path.dirname(os.path.abspath(__file__)) -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="cedric.geissmann@unibas.ch") - - -if common_setup.is_test_run(): - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = Experiment() - - -class TranslatorDiffReport(PlanningReport): - def get_cell(self, run): - return ";".join(run.get(attr) for attr in self.attributes) - - def get_text(self): - lines = [] - for runs in self.problem_runs.values(): - hashes = set([r.get("translator_output_sas_hash") for r in runs]) - if len(hashes) > 1 or None in hashes: - lines.append(";".join([self.get_cell(r) for r in runs])) - return "\n".join(lines) - - -class SameValueFilters(object): - """Ignore runs for a task where all algorithms have the same value.""" - def __init__(self, attribute): - self._attribute = attribute - self._tasks_to_values = defaultdict(list) - - def _get_task(self, run): - return (run['domain'], run['problem']) - - def store_values(self, run): - value = run.get(self._attribute) - self._tasks_to_values[self._get_task(run)].append(value) - # Don't filter this run, yet. - return True - - def filter_tasks_with_equal_values(self, run): - values = self._tasks_to_values[self._get_task(run)] - return len(set(values)) != 1 - - -exp.add_fetcher(src='data/issue939-base-eval') -exp.add_fetcher(src='data/issue939-v1-eval', merge=True) - -ATTRIBUTES = ["error", "run_dir", "translator_*", "translator_output_sas_hash"] -#exp.add_comparison_table_step(attributes=ATTRIBUTES) - -same_value_filters = SameValueFilters("translator_output_sas_hash") -# exp.add_comparison_table_step( -# name="filtered", -# attributes=ATTRIBUTES, -# filter=[same_value_filters.store_values, same_value_filters.filter_tasks_with_equal_values]) - -exp.add_report(TranslatorDiffReport( - attributes=["domain", "problem", "algorithm", "run_dir"] - ), outfile="different_output_sas.csv" -) - -exp.add_report(AbsoluteReport(attributes=ATTRIBUTES)) -exp.add_report(ComparativeReport([ - ('issue939-base-translate-only', 'issue939-v1-translate-only') - ], attributes=ATTRIBUTES)) - -exp.run_steps() diff --git a/experiments/issue939/relativescatter.py b/experiments/issue939/relativescatter.py deleted file mode 100644 index f74cb6e721..0000000000 --- a/experiments/issue939/relativescatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- - -from collections import defaultdict - -from matplotlib import ticker - -from downward.reports.scatter import ScatterPlotReport -from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot - - -# TODO: handle outliers - -# TODO: this is mostly copied from ScatterMatplotlib (scatter.py) -class RelativeScatterMatplotlib(Matplotlib): - @classmethod - def _plot(cls, report, axes, categories, styles): - # Display grid - axes.grid(b=True, linestyle='-', color='0.75') - - has_points = False - # Generate the scatter plots - for category, coords in sorted(categories.items()): - X, Y = zip(*coords) - axes.scatter(X, Y, s=42, label=category, **styles[category]) - if X and Y: - has_points = True - - if report.xscale == 'linear' or report.yscale == 'linear': - plot_size = report.missing_val * 1.01 - else: - plot_size = report.missing_val * 1.25 - - # make 5 ticks above and below 1 - yticks = [] - tick_step = report.ylim_top**(1/5.0) - for i in xrange(-5, 6): - yticks.append(tick_step**i) - axes.set_yticks(yticks) - axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter()) - - axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size) - axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size) - - for axis in [axes.xaxis, axes.yaxis]: - MatplotlibPlot.change_axis_formatter( - axis, - report.missing_val if report.show_missing else None) - return has_points - - -class RelativeScatterPlotReport(ScatterPlotReport): - """ - Generate a scatter plot that shows a relative comparison of two - algorithms with regard to the given attribute. The attribute value - of algorithm 1 is shown on the x-axis and the relation to the value - of algorithm 2 on the y-axis. - """ - - def __init__(self, show_missing=True, get_category=None, **kwargs): - ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs) - if self.output_format == 'tex': - raise "not supported" - else: - self.writer = RelativeScatterMatplotlib - - def _fill_categories(self, runs): - # We discard the *runs* parameter. - # Map category names to value tuples - categories = defaultdict(list) - self.ylim_bottom = 2 - self.ylim_top = 0.5 - self.xlim_left = float("inf") - for (domain, problem), runs in self.problem_runs.items(): - if len(runs) != 2: - continue - run1, run2 = runs - assert (run1['algorithm'] == self.algorithms[0] and - run2['algorithm'] == self.algorithms[1]) - val1 = run1.get(self.attribute) - val2 = run2.get(self.attribute) - if val1 is None or val2 is None: - continue - category = self.get_category(run1, run2) - assert val1 > 0, (domain, problem, self.algorithms[0], val1) - assert val2 > 0, (domain, problem, self.algorithms[1], val2) - x = val1 - y = val2 / float(val1) - - categories[category].append((x, y)) - - self.ylim_top = max(self.ylim_top, y) - self.ylim_bottom = min(self.ylim_bottom, y) - self.xlim_left = min(self.xlim_left, x) - - # center around 1 - if self.ylim_bottom < 1: - self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom)) - if self.ylim_top > 1: - self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top)) - return categories - - def _set_scales(self, xscale, yscale): - # ScatterPlot uses log-scaling on the x-axis by default. - PlotReport._set_scales( - self, xscale or self.attribute.scale or 'log', 'log') diff --git a/experiments/issue939/requirements.txt b/experiments/issue939/requirements.txt deleted file mode 100644 index c826f88f41..0000000000 --- a/experiments/issue939/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -lab==4.2 diff --git a/experiments/issue939/translator_additional_parser.py b/experiments/issue939/translator_additional_parser.py deleted file mode 100755 index b18b109039..0000000000 --- a/experiments/issue939/translator_additional_parser.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -import hashlib - -from lab.parser import Parser - -def add_hash_value(content, props): - props['translator_output_sas_hash'] = hashlib.sha512(str(content).encode('utf-8')).hexdigest() - -parser = Parser() -parser.add_function(add_hash_value, file="output.sas") -parser.parse() diff --git a/experiments/issue939/v1.py b/experiments/issue939/v1.py deleted file mode 100755 index 59ba02bc1d..0000000000 --- a/experiments/issue939/v1.py +++ /dev/null @@ -1,190 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - - -EXPNAME = common_setup.get_experiment_name() -DIR = os.path.dirname(os.path.abspath(__file__)) -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue939-v1"] -CONFIGS = [ - IssueConfig( - "translate-only", - [], - driver_options=["--translate"]) -] -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="cedric.geissmann@unibas.ch") - -# This was generated by running "./suites.py all" in the benchmarks -# repository. -SUITE = [ - 'agricola-opt18-strips', - 'agricola-sat18-strips', - 'airport', - 'airport-adl', - 'assembly', - 'barman-mco14-strips', - 'barman-opt11-strips', - 'barman-opt14-strips', - 'barman-sat11-strips', - 'barman-sat14-strips', - 'blocks', - 'caldera-opt18-adl', - 'caldera-sat18-adl', - 'caldera-split-opt18-adl', - 'caldera-split-sat18-adl', - 'cavediving-14-adl', - 'childsnack-opt14-strips', - 'childsnack-sat14-strips', - 'citycar-opt14-adl', - 'citycar-sat14-adl', - 'data-network-opt18-strips', - 'data-network-sat18-strips', - 'depot', - 'driverlog', - 'elevators-opt08-strips', - 'elevators-opt11-strips', - 'elevators-sat08-strips', - 'elevators-sat11-strips', - 'flashfill-sat18-adl', - 'floortile-opt11-strips', - 'floortile-opt14-strips', - 'floortile-sat11-strips', - 'floortile-sat14-strips', - 'freecell', - 'ged-opt14-strips', - 'ged-sat14-strips', - 'grid', - 'gripper', - 'hiking-agl14-strips', - 'hiking-opt14-strips', - 'hiking-sat14-strips', - 'logistics00', - 'logistics98', - 'maintenance-opt14-adl', - 'maintenance-sat14-adl', - 'miconic', - 'miconic-fulladl', - 'miconic-simpleadl', - 'movie', - 'mprime', - 'mystery', - 'no-mprime', - 'no-mystery', - 'nomystery-opt11-strips', - 'nomystery-sat11-strips', - 'nurikabe-opt18-adl', - 'nurikabe-sat18-adl', - 'openstacks', - 'openstacks-agl14-strips', - 'openstacks-opt08-adl', - 'openstacks-opt08-strips', - 'openstacks-opt11-strips', - 'openstacks-opt14-strips', - 'openstacks-sat08-adl', - 'openstacks-sat08-strips', - 'openstacks-sat11-strips', - 'openstacks-sat14-strips', - 'openstacks-strips', - 'optical-telegraphs', - 'organic-synthesis-opt18-strips', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-opt18-strips', - 'organic-synthesis-split-sat18-strips', - 'parcprinter-08-strips', - 'parcprinter-opt11-strips', - 'parcprinter-sat11-strips', - 'parking-opt11-strips', - 'parking-opt14-strips', - 'parking-sat11-strips', - 'parking-sat14-strips', - 'pathways', - 'pathways-noneg', - 'pegsol-08-strips', - 'pegsol-opt11-strips', - 'pegsol-sat11-strips', - 'petri-net-alignment-opt18-strips', - 'philosophers', - 'pipesworld-notankage', - 'pipesworld-tankage', - 'psr-large', - 'psr-middle', - 'psr-small', - 'rovers', - 'satellite', - 'scanalyzer-08-strips', - 'scanalyzer-opt11-strips', - 'scanalyzer-sat11-strips', - 'schedule', - 'settlers-opt18-adl', - 'settlers-sat18-adl', - 'snake-opt18-strips', - 'snake-sat18-strips', - 'sokoban-opt08-strips', - 'sokoban-opt11-strips', - 'sokoban-sat08-strips', - 'sokoban-sat11-strips', - 'spider-opt18-strips', - 'spider-sat18-strips', - 'storage', - 'termes-opt18-strips', - 'termes-sat18-strips', - 'tetris-opt14-strips', - 'tetris-sat14-strips', - 'thoughtful-mco14-strips', - 'thoughtful-sat14-strips', - 'tidybot-opt11-strips', - 'tidybot-opt14-strips', - 'tidybot-sat11-strips', - 'tpp', - 'transport-opt08-strips', - 'transport-opt11-strips', - 'transport-opt14-strips', - 'transport-sat08-strips', - 'transport-sat11-strips', - 'transport-sat14-strips', - 'trucks', - 'trucks-strips', - 'visitall-opt11-strips', - 'visitall-opt14-strips', - 'visitall-sat11-strips', - 'visitall-sat14-strips', - 'woodworking-opt08-strips', - 'woodworking-opt11-strips', - 'woodworking-sat08-strips', - 'woodworking-sat11-strips', - 'zenotravel', -] - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=4) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.TRANSLATOR_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser("translator_additional_parser.py") - -del exp.commands['remove-output-sas'] - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_parse_again_step() -exp.add_fetcher(name='fetch') - -exp.run_steps() diff --git a/experiments/issue960/common_setup.py b/experiments/issue960/common_setup.py deleted file mode 100644 index b9cd76070f..0000000000 --- a/experiments/issue960/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, lambda: make_scatter_plots) diff --git a/experiments/issue960/requirements.txt b/experiments/issue960/requirements.txt deleted file mode 100644 index c826f88f41..0000000000 --- a/experiments/issue960/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -lab==4.2 diff --git a/experiments/issue960/v1.py b/experiments/issue960/v1.py deleted file mode 100755 index 983bba18d7..0000000000 --- a/experiments/issue960/v1.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue960-base", "issue960-v1"] -CONFIGS = [ - IssueConfig("opcount-seq-lmcut-cplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=cplex))"]), - IssueConfig("diverse-potentials-cplex", ["--search", "astar(diverse_potentials(lpsolver=cplex,random_seed=1729))"]), - IssueConfig("optimal-lmcount-cplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=cplex))"]), - IssueConfig("opcount-seq-lmcut-soplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=soplex))"]), - IssueConfig("diverse-potentials-soplex", ["--search", "astar(diverse_potentials(lpsolver=soplex,random_seed=1729))"]), - IssueConfig("optimal-lmcount-soplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=soplex))"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="rik.degraaff@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time", "memory"]) - -exp.run_steps() diff --git a/experiments/issue960/v2.py b/experiments/issue960/v2.py deleted file mode 100755 index 5b2a16f46f..0000000000 --- a/experiments/issue960/v2.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue960-base", "issue960-v2"] -CONFIGS = [ - IssueConfig("opcount-seq-lmcut-cplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=cplex))"]), - IssueConfig("diverse-potentials-cplex", ["--search", "astar(diverse_potentials(lpsolver=cplex,random_seed=1729))"]), - IssueConfig("optimal-lmcount-cplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=cplex))"]), - IssueConfig("opcount-seq-lmcut-soplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=soplex))"]), - IssueConfig("diverse-potentials-soplex", ["--search", "astar(diverse_potentials(lpsolver=soplex,random_seed=1729))"]), - IssueConfig("optimal-lmcount-soplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=soplex))"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="rik.degraaff@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time", "memory"]) - -exp.run_steps() diff --git a/experiments/issue960/v3.py b/experiments/issue960/v3.py deleted file mode 100755 index f92e50bf8c..0000000000 --- a/experiments/issue960/v3.py +++ /dev/null @@ -1,59 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue960-base", "issue960-v3"] -CONFIGS = [ - IssueConfig("opcount-seq-lmcut-cplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=cplex))"]), - IssueConfig("diverse-potentials-cplex", ["--search", "astar(diverse_potentials(lpsolver=cplex,random_seed=1729))"]), - IssueConfig("optimal-lmcount-cplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=cplex))"]), - IssueConfig("opcount-seq-lmcut-soplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=soplex))"]), - IssueConfig("diverse-potentials-soplex", ["--search", "astar(diverse_potentials(lpsolver=soplex,random_seed=1729))"]), - IssueConfig("diverse-potentials-soplex-copy", ["--search", "astar(diverse_potentials(lpsolver=soplex,random_seed=1729))"]), - IssueConfig("optimal-lmcount-soplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=soplex))"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="rik.degraaff@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - -#exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time", "memory"], - additional=[ - ("diverse-potentials-soplex", "diverse-potentials-soplex-copy", "issue960-base", "issue960-base", "memory"), - ("diverse-potentials-soplex", "diverse-potentials-soplex-copy", "issue960-base", "issue960-base", "total_time")]) - -exp.run_steps() diff --git a/experiments/issue983/common_setup.py b/experiments/issue983/common_setup.py deleted file mode 100644 index 9899f21500..0000000000 --- a/experiments/issue983/common_setup.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue983/requirements.txt b/experiments/issue983/requirements.txt deleted file mode 100644 index b61be8f422..0000000000 --- a/experiments/issue983/requirements.txt +++ /dev/null @@ -1,12 +0,0 @@ -certifi==2020.6.20 -cycler==0.10.0 -kiwisolver==1.2.0 -lab==6.2 -matplotlib==3.3.2 -numpy==1.22.2 -Pillow==9.0.1 -pyparsing==2.4.7 -python-dateutil==2.8.1 -simplejson==3.17.2 -six==1.15.0 -txt2tags==3.7 diff --git a/experiments/issue983/v1.py b/experiments/issue983/v1.py deleted file mode 100755 index 386771421c..0000000000 --- a/experiments/issue983/v1.py +++ /dev/null @@ -1,81 +0,0 @@ -#! /usr/bin/env python3 - -import itertools -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue983-v1"] -CONFIGS = [ - IssueConfig("opcount-hplus", ["--search", - "astar(operatorcounting([" - "delete_relaxation_constraints(use_time_vars=true, use_integer_vars=true)" - "], use_integer_operator_counts=True), bound=0)"]), - IssueConfig("opcount-hplus-lmcut", ["--search", - "astar(operatorcounting([" - "delete_relaxation_constraints(use_time_vars=true, use_integer_vars=true)," - "lmcut_constraints()" - "], use_integer_operator_counts=True), bound=0)"]), - IssueConfig("relaxed-lmcut", ["--search", "astar(lmcut())", "--translate-options", "--relaxed"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_1", - email="florian.pommerening@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"]) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - - -def add_hplus(run): - if "hplus" in run["algorithm"]: - run["hplus"] = run.get("initial_h_value") - else: - run["hplus"] = run.get("cost") - return run - -exp.add_report(ComparativeReport([ - ("issue983-v1-opcount-hplus", "issue983-v1-opcount-hplus-lmcut", "Diff (adding landmarks)"), - ("issue983-v1-opcount-hplus", "issue983-v1-relaxed-lmcut", "Diff (MIP/search)"), - ("issue983-v1-opcount-hplus-lmcut", "issue983-v1-relaxed-lmcut", "Diff (MIP+LM/search)"), - ], - filter=add_hplus, - attributes=exp.DEFAULT_TABLE_ATTRIBUTES + ["initial_h_value", "hplus"])) - -exp.add_scatter_plot_step(relative=False, attributes=["total_time", "memory"], - additional=[ - ("opcount-hplus", "opcount-hplus-lmcut", "issue983-v1", "issue983-v1", "total_time"), - ("opcount-hplus", "opcount-hplus-lmcut", "issue983-v1", "issue983-v1", "memory"), - ("opcount-hplus", "relaxed-lmcut", "issue983-v1", "issue983-v1", "total_time"), - ("opcount-hplus", "relaxed-lmcut", "issue983-v1", "issue983-v1", "memory"), - ("opcount-hplus-lmcut", "relaxed-lmcut", "issue983-v1", "issue983-v1", "total_time"), - ("opcount-hplus-lmcut", "relaxed-lmcut", "issue983-v1", "issue983-v1", "memory"), - ]) - -exp.run_steps() diff --git a/experiments/issue983/v2-cplex.py b/experiments/issue983/v2-cplex.py deleted file mode 100755 index bcf7574eb3..0000000000 --- a/experiments/issue983/v2-cplex.py +++ /dev/null @@ -1,111 +0,0 @@ -#! /usr/bin/env python3 - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from downward.reports.compare import ComparativeReport - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue983-v1", "issue983-v2"] -CONFIGS = [] - -driver_options = ["--overall-time-limit", "5m"] -for integer in [True, False]: - for time in [True, False]: - CONFIGS += [ - IssueConfig(f"opcount-hplus{'-int' if integer else ''}" - f"{'-time' if time else ''}", - ["--search", - "astar(operatorcounting([delete_relaxation_constraints" - f"(use_time_vars={time}, use_integer_vars={integer})]," - f"use_integer_operator_counts={integer}))"], - driver_options=driver_options), - IssueConfig(f"opcount-hplus-lmcut{'-int' if integer else ''}" - f"{'-time' if time else ''}", - ["--search", - "astar(operatorcounting([" - f"delete_relaxation_constraints(use_time_vars={time}, " - f"use_integer_vars={integer}), lmcut_constraints], " - f"use_integer_operator_counts={integer}))"], - driver_options=driver_options) - ] - CONFIGS += [ - IssueConfig(f"state-eq{'-int' if integer else ''}", - ["--search", - "astar(operatorcounting([state_equation_constraints]," - f"use_integer_operator_counts={integer}))"], - driver_options=driver_options), - IssueConfig(f"state-eq-lmcut{'-int' if integer else ''}", - ["--search", - "astar(operatorcounting([state_equation_constraints," - "lmcut_constraints]," - f"use_integer_operator_counts={integer}))"], - driver_options=driver_options), - IssueConfig(f"post-hoc-opt{'-int' if integer else ''}", - ["--search", - "astar(operatorcounting([pho_constraints(systematic(2))]," - f"use_integer_operator_counts={integer}))"], - driver_options=driver_options), - ] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=3) - -exp = IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) -exp.add_algorithm( - "relaxed-lmcut", common_setup.get_repo_base(), REVISIONS[0], - ["--search", "astar(lmcut())", "--translate-options", "--relaxed"]) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(exp.PLANNER_PARSER) - -exp.add_step('build', exp.build) -exp.add_step('start', exp.start_runs) -exp.add_fetcher(name='fetch') - - -def get_task(id): - return f"{id[1]}:{id[2]}" - -def add_hplus(run): - if "hplus" in run["algorithm"]: - run["hplus"] = run.get("initial_h_value") - elif "relaxed-lmcut" in run["algorithm"]: - run["hplus"] = run.get("cost") - return run - - -exp.add_report(ComparativeReport( - filter=[add_hplus], - attributes=["hplus"], - algorithm_pairs=[("issue983-v1-opcount-hplus-int-time", "relaxed-lmcut"), ("issue983-v2-opcount-hplus-int-time", "relaxed-lmcut")]), - outfile="issue983-v2-compare-hplus.html") - - -exp.add_comparison_table_step( - attributes=exp.DEFAULT_TABLE_ATTRIBUTES + [ - "initial_h_value" - ] -) - -exp.run_steps() diff --git a/experiments/issue988/common_setup.py b/experiments/issue988/common_setup.py deleted file mode 100644 index f2bbda8569..0000000000 --- a/experiments/issue988/common_setup.py +++ /dev/null @@ -1,398 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - print(config) - for rev1, rev2 in itertools.combinations(self._revisions, 2): - print(rev1, rev2) - for attribute in self.get_supported_attributes( - config.nick, attributes): - print(attribute) - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue988/requirements.txt b/experiments/issue988/requirements.txt deleted file mode 100644 index d4330da5d4..0000000000 --- a/experiments/issue988/requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -cycler==0.10.0 -kiwisolver==1.3.1 -lab==6.2 -matplotlib==3.3.3 -numpy==1.22.2 -Pillow==9.0.1 -pyparsing==2.4.7 -python-dateutil==2.8.1 -simplejson==3.17.2 -six==1.15.0 -txt2tags==3.7 diff --git a/experiments/issue988/v1-optimal.py b/experiments/issue988/v1-optimal.py deleted file mode 100755 index 4aab51e62c..0000000000 --- a/experiments/issue988/v1-optimal.py +++ /dev/null @@ -1,51 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue988-base", "issue988-v1"] - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_comparison_table_step() - -exp.run_steps() - diff --git a/experiments/issue988/v1-satisficing.py b/experiments/issue988/v1-satisficing.py deleted file mode 100755 index 922008b461..0000000000 --- a/experiments/issue988/v1-satisficing.py +++ /dev/null @@ -1,53 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue988-base", "issue988-v1"] - -CONFIGS = [ - IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_comparison_table_step() -exp.add_scatter_plot_step(relative=True, attributes=["total_time"]) - -exp.run_steps() - diff --git a/experiments/issue990/common_setup.py b/experiments/issue990/common_setup.py deleted file mode 100644 index eeca3aadb5..0000000000 --- a/experiments/issue990/common_setup.py +++ /dev/null @@ -1,395 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, lambda: make_scatter_plots) diff --git a/experiments/issue990/v1-optimal-2.py b/experiments/issue990/v1-optimal-2.py deleted file mode 100755 index 12a6378381..0000000000 --- a/experiments/issue990/v1-optimal-2.py +++ /dev/null @@ -1,57 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -REVISIONS = [ - "issue990-base", - "issue990-v1", -] - -CONFIGS = [ - common_setup.IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), - common_setup.IssueConfig("lm-hm2", ["--evaluator", "lmc=lmcount(lm_hm(m=2),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), - common_setup.IssueConfig("seq-opt-bjolp-opt", ["--evaluator", "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true, optimal=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), - common_setup.IssueConfig("lm-exhaust", ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", "--search", "astar(lmc,lazy_evaluator=lmc)"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="tho.keller@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue990/v1-optimal.py b/experiments/issue990/v1-optimal.py deleted file mode 100755 index 879320b113..0000000000 --- a/experiments/issue990/v1-optimal.py +++ /dev/null @@ -1,54 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -REVISIONS = [ - "issue990-base", - "issue990-v1", -] - -CONFIGS = [ - common_setup.IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), - common_setup.IssueConfig("lm-exhaust", ["--search", "astar(lmcount(lm_exhaust(), admissible=true))"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_OPTIMAL_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="tho.keller@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_absolute_report_step() -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue990/v1-satisficing-2.py b/experiments/issue990/v1-satisficing-2.py deleted file mode 100755 index 57479f34ec..0000000000 --- a/experiments/issue990/v1-satisficing-2.py +++ /dev/null @@ -1,57 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -REVISIONS = [ - "issue990-base", - "issue990-v1", -] - -CONFIGS = [ - common_setup.IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), - common_setup.IssueConfig("lama-first-pref", ["--evaluator", "hlm=lmcount(lm_factory=lm_rhw(reasonable_orders=true),transform=adapt_costs(one),pref=true)", "--evaluator", "hff=ff(transform=adapt_costs(one))", "--search", """lazy_greedy([hff,hlm],preferred=[hff,hlm], cost_type=one,reopen_closed=false)"""]), - common_setup.IssueConfig("lm-zg", ["--search", "eager_greedy([lmcount(lm_zg(reasonable_orders=false))])"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="tho.keller@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_absolute_report_step() -exp.add_comparison_table_step() -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue990/v1-satisficing.py b/experiments/issue990/v1-satisficing.py deleted file mode 100755 index cf337724d6..0000000000 --- a/experiments/issue990/v1-satisficing.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -REVISIONS = [ - "issue990-base", - "issue990-v1", -] - -CONFIGS = [ - common_setup.IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), - common_setup.IssueConfig("lm-zg", ["--search", "eager_greedy([lmcount(lm_zg(reasonable_orders=false))])"]), -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="tho.keller@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], - ) -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_absolute_report_step() -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue995/common_setup.py b/experiments/issue995/common_setup.py deleted file mode 100644 index f2bbda8569..0000000000 --- a/experiments/issue995/common_setup.py +++ /dev/null @@ -1,398 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - print(config) - for rev1, rev2 in itertools.combinations(self._revisions, 2): - print(rev1, rev2) - for attribute in self.get_supported_attributes( - config.nick, attributes): - print(attribute) - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue995/landmark_parser.py b/experiments/issue995/landmark_parser.py deleted file mode 100755 index dd77b69c4f..0000000000 --- a/experiments/issue995/landmark_parser.py +++ /dev/null @@ -1,29 +0,0 @@ -import re - -from lab.parser import Parser - -class BottomUpParser(Parser): - - def __init__(self): - super().__init__() - - def add_bottom_up_pattern(self, name, regex, file="run.log", required=False, type=int, flags=""): - - def search_from_bottom(content, props): - reversed_content = "\n".join(reversed(content.splitlines())) - match = re.search(regex, reversed_content) - if required and not match: - logging.error("Pattern {0} not found in file {1}".format(regex, file)) - if match: - props[name] = type(match.group(1)) - - self.add_function(search_from_bottom, file=file) - -parser = BottomUpParser() -parser.add_bottom_up_pattern("landmarks", r"Discovered (\d+) landmarks") -parser.add_bottom_up_pattern("conj_landmarks", r"(\d+) are conjunctive") -parser.add_bottom_up_pattern("disj_landmarks", r"(\d+) are disjunctive") -parser.add_bottom_up_pattern("edges", r"(\d+) edges") -parser.add_bottom_up_pattern("landmark_generation_time", r"Landmarks generation time: (.+)s",type=float) -parser.parse() - diff --git a/experiments/issue995/requirements.txt b/experiments/issue995/requirements.txt deleted file mode 100644 index d4330da5d4..0000000000 --- a/experiments/issue995/requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -cycler==0.10.0 -kiwisolver==1.3.1 -lab==6.2 -matplotlib==3.3.3 -numpy==1.22.2 -Pillow==9.0.1 -pyparsing==2.4.7 -python-dateutil==2.8.1 -simplejson==3.17.2 -six==1.15.0 -txt2tags==3.7 diff --git a/experiments/issue995/v1-optimal.py b/experiments/issue995/v1-optimal.py deleted file mode 100755 index c323d98219..0000000000 --- a/experiments/issue995/v1-optimal.py +++ /dev/null @@ -1,87 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue995-base", "issue995-v1"] - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), - IssueConfig("lm_exhaust", ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - IssueConfig("lm_hm", ["--evaluator", "lmc=lmcount(lm_hm(m=2),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - IssueConfig("seq-opt-bjolp-optimal", ["--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="salome.eriksson@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(os.path.join(DIR, "landmark_parser.py")) - - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") - -ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - "landmarks", - "edges" - ] -exp.add_comparison_table_step(attributes=ATTRIBUTES) -exp.add_scatter_plot_step(attributes=['search_time']) -exp.add_scatter_plot_step(attributes=['search_time'], relative=True) -exp.run_steps() - diff --git a/experiments/issue995/v1-satisficing.py b/experiments/issue995/v1-satisficing.py deleted file mode 100755 index b9dfd57f8d..0000000000 --- a/experiments/issue995/v1-satisficing.py +++ /dev/null @@ -1,81 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue995-base", "issue995-v1"] - -CONFIGS = [ - IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), - IssueConfig("lm_zg", ["--search", "eager_greedy([lmcount(lm_zg())])"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="salome.eriksson@unibas.ch", - export=['export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib', "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(os.path.join(DIR, "landmark_parser.py")) - - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - "landmarks", - "edges" - ] - -exp.add_comparison_table_step(attributes=ATTRIBUTES) -exp.add_parse_again_step() -exp.run_steps() - diff --git a/experiments/issue995/v2-optimal.py b/experiments/issue995/v2-optimal.py deleted file mode 100755 index a7810e1132..0000000000 --- a/experiments/issue995/v2-optimal.py +++ /dev/null @@ -1,89 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue995-base", "issue995-v2"] - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), - IssueConfig("lm_exhaust", ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - IssueConfig("lm_hm", ["--evaluator", "lmc=lmcount(lm_hm(m=2),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - IssueConfig("seq-opt-bjolp-optimal", ["--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="salome.eriksson@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(os.path.join(DIR, "landmark_parser.py")) - - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_parse_again_step() - -ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - "landmarks", - "edges", - "landmark_generation_time" - ] -exp.add_comparison_table_step(attributes=ATTRIBUTES) -exp.add_scatter_plot_step(attributes=['search_time']) -exp.add_scatter_plot_step(attributes=['search_time'], relative=True) -exp.run_steps() - diff --git a/experiments/issue995/v2-satisficing.py b/experiments/issue995/v2-satisficing.py deleted file mode 100755 index 13a121e69f..0000000000 --- a/experiments/issue995/v2-satisficing.py +++ /dev/null @@ -1,82 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue995-base", "issue995-v2"] - -CONFIGS = [ - IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), - IssueConfig("lm_zg", ["--search", "eager_greedy([lmcount(lm_zg())])"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="salome.eriksson@unibas.ch", - export=['export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib', "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(os.path.join(DIR, "landmark_parser.py")) - - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_parse_again_step() -ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - "landmarks", - "edges", - "landmark_generation_time", - ] - -exp.add_comparison_table_step(attributes=ATTRIBUTES) -exp.run_steps() - diff --git a/experiments/issue995/v3-optimal.py b/experiments/issue995/v3-optimal.py deleted file mode 100755 index 6a59d0fcef..0000000000 --- a/experiments/issue995/v3-optimal.py +++ /dev/null @@ -1,88 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue995-v3-base", "issue995-v3"] - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), - IssueConfig("lm_exhaust", ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - IssueConfig("lm_hm", ["--evaluator", "lmc=lmcount(lm_hm(m=2),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - IssueConfig("seq-opt-bjolp-optimal", ["--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="salome.eriksson@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(os.path.join(DIR, "landmark_parser.py")) - - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") - -ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - "landmarks", - "edges", - "landmark_generation_time" - ] -exp.add_comparison_table_step(attributes=ATTRIBUTES) -exp.add_scatter_plot_step(attributes=['search_time']) -exp.add_scatter_plot_step(attributes=['search_time'], relative=True) -exp.run_steps() - diff --git a/experiments/issue995/v3-satisficing.py b/experiments/issue995/v3-satisficing.py deleted file mode 100755 index ff7836bbbc..0000000000 --- a/experiments/issue995/v3-satisficing.py +++ /dev/null @@ -1,81 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue995-v3-base", "issue995-v3"] - -CONFIGS = [ - IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), - IssueConfig("lm_zg", ["--search", "eager_greedy([lmcount(lm_zg())])"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="salome.eriksson@unibas.ch", - export=['export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib', "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser(os.path.join(DIR, "landmark_parser.py")) - - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - "landmarks", - "edges", - "landmark_generation_time", - ] - -exp.add_comparison_table_step(attributes=ATTRIBUTES) -exp.run_steps() - diff --git a/experiments/issue998/common_setup.py b/experiments/issue998/common_setup.py deleted file mode 100644 index eeca3aadb5..0000000000 --- a/experiments/issue998/common_setup.py +++ /dev/null @@ -1,395 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - for rev1, rev2 in itertools.combinations(self._revisions, 2): - for attribute in self.get_supported_attributes( - config.nick, attributes): - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, lambda: make_scatter_plots) diff --git a/experiments/issue998/landmark_parser.py b/experiments/issue998/landmark_parser.py deleted file mode 100755 index 943492471b..0000000000 --- a/experiments/issue998/landmark_parser.py +++ /dev/null @@ -1,29 +0,0 @@ -#! /usr/bin/env python - -import re - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern( - "lmgraph_generation_time", - r"Landmark graph generation time: (.+)s", - type=float) -parser.add_pattern( - "landmarks", - r"Landmark graph contains (\d+) landmarks, of which \d+ are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_disjunctive", - r"Landmark graph contains \d+ landmarks, of which (\d+) are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_conjunctive", - r"Landmark graph contains \d+ landmarks, of which \d+ are disjunctive and (\d+) are conjunctive.", - type=int) -parser.add_pattern( - "orderings", - r"Landmark graph contains (\d+) orderings.", - type=int) - -parser.parse() diff --git a/experiments/issue998/requirements.txt b/experiments/issue998/requirements.txt deleted file mode 100644 index cd2f06836a..0000000000 --- a/experiments/issue998/requirements.txt +++ /dev/null @@ -1,22 +0,0 @@ -certifi==2019.9.11 -chardet==3.0.4 -cplex==12.10.0.0 -cycler==0.11.0 -docloud==1.0.375 -docplex==2.11.176 -enum34==1.1.6 -fonttools==4.29.1 -idna==2.8 -kiwisolver==1.3.2 -lab==7.0 -matplotlib==3.5.1 -numpy==1.22.2 -packaging==21.3 -Pillow==9.0.1 -pyparsing==3.0.7 -python-dateutil==2.8.2 -requests==2.22.0 -simplejson==3.17.6 -six==1.12.0 -txt2tags==3.7 -urllib3==1.26.5 diff --git a/experiments/issue998/v1-satisficing.py b/experiments/issue998/v1-satisficing.py deleted file mode 100755 index c1e6634c47..0000000000 --- a/experiments/issue998/v1-satisficing.py +++ /dev/null @@ -1,66 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - -import common_setup - -from common_setup import IssueConfig, IssueExperiment - -import os - -from lab.reports import Attribute - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment - -REVISIONS = [ - "issue998-base", - "issue998-v1", -] - -CONFIGS = [ - IssueConfig("lm_zg", ["--search", "eager_greedy([lmcount(lm_zg())])"]) -] - -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REPO = os.environ["DOWNWARD_REPO"] - -if common_setup.is_running_on_cluster(): - SUITE = common_setup.DEFAULT_SATISFICING_SUITE - ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="salome.eriksson@unibas.ch", - setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib') - -else: - SUITE = common_setup.IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctiv", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time"), -] - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_comparison_table_step(attributes=ATTRIBUTES) -exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue999/common_setup.py b/experiments/issue999/common_setup.py deleted file mode 100644 index f2bbda8569..0000000000 --- a/experiments/issue999/common_setup.py +++ /dev/null @@ -1,398 +0,0 @@ -# -*- coding: utf-8 -*- - -import itertools -import os -import platform -import subprocess -import sys - -from lab.experiment import ARGPARSER -from lab import tools - -from downward.experiment import FastDownwardExperiment -from downward.reports.absolute import AbsoluteReport -from downward.reports.compare import ComparativeReport -from downward.reports.scatter import ScatterPlotReport - - -def parse_args(): - ARGPARSER.add_argument( - "--test", - choices=["yes", "no", "auto"], - default="auto", - dest="test_run", - help="test experiment locally on a small suite if --test=yes or " - "--test=auto and we are not on a cluster") - return ARGPARSER.parse_args() - -ARGS = parse_args() - - -DEFAULT_OPTIMAL_SUITE = [ - 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', - 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', - 'data-network-opt18-strips', 'depot', 'driverlog', - 'elevators-opt08-strips', 'elevators-opt11-strips', - 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', - 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', - 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', - 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', - 'openstacks-opt11-strips', 'openstacks-opt14-strips', - 'openstacks-strips', 'organic-synthesis-opt18-strips', - 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', - 'parcprinter-opt11-strips', 'parking-opt11-strips', - 'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips', - 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', - 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', - 'snake-opt18-strips', 'sokoban-opt08-strips', - 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', - 'termes-opt18-strips', 'tetris-opt14-strips', - 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', - 'transport-opt08-strips', 'transport-opt11-strips', - 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', - 'visitall-opt14-strips', 'woodworking-opt08-strips', - 'woodworking-opt11-strips', 'zenotravel'] - -DEFAULT_SATISFICING_SUITE = [ - 'agricola-sat18-strips', 'airport', 'assembly', - 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', - 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', - 'childsnack-sat14-strips', 'citycar-sat14-adl', - 'data-network-sat18-strips', 'depot', 'driverlog', - 'elevators-sat08-strips', 'elevators-sat11-strips', - 'flashfill-sat18-adl', 'floortile-sat11-strips', - 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', - 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', - 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', - 'miconic-simpleadl', 'movie', 'mprime', 'mystery', - 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', - 'openstacks-sat08-adl', 'openstacks-sat08-strips', - 'openstacks-sat11-strips', 'openstacks-sat14-strips', - 'openstacks-strips', 'optical-telegraphs', - 'organic-synthesis-sat18-strips', - 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', - 'parcprinter-sat11-strips', 'parking-sat11-strips', - 'parking-sat14-strips', 'pathways', 'pathways-noneg', - 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', - 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', - 'psr-middle', 'psr-small', 'rovers', 'satellite', - 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', - 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', - 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', - 'termes-sat18-strips', 'tetris-sat14-strips', - 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', - 'transport-sat08-strips', 'transport-sat11-strips', - 'transport-sat14-strips', 'trucks', 'trucks-strips', - 'visitall-sat11-strips', 'visitall-sat14-strips', - 'woodworking-sat08-strips', 'woodworking-sat11-strips', - 'zenotravel'] - - -def get_script(): - """Get file name of main script.""" - return tools.get_script_path() - - -def get_script_dir(): - """Get directory of main script. - - Usually a relative directory (depends on how it was called by the user.)""" - return os.path.dirname(get_script()) - - -def get_experiment_name(): - """Get name for experiment. - - Derived from the absolute filename of the main script, e.g. - "/ham/spam/eggs.py" => "spam-eggs".""" - script = os.path.abspath(get_script()) - script_dir = os.path.basename(os.path.dirname(script)) - script_base = os.path.splitext(os.path.basename(script))[0] - return "%s-%s" % (script_dir, script_base) - - -def get_data_dir(): - """Get data dir for the experiment. - - This is the subdirectory "data" of the directory containing - the main script.""" - return os.path.join(get_script_dir(), "data", get_experiment_name()) - - -def get_repo_base(): - """Get base directory of the repository, as an absolute path. - - Search upwards in the directory tree from the main script until a - directory with a subdirectory named ".git" is found. - - Abort if the repo base cannot be found.""" - path = os.path.abspath(get_script_dir()) - while os.path.dirname(path) != path: - if os.path.exists(os.path.join(path, ".git")): - return path - path = os.path.dirname(path) - sys.exit("repo base could not be found") - - -def is_running_on_cluster(): - node = platform.node() - return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") - - -def is_test_run(): - return ARGS.test_run == "yes" or ( - ARGS.test_run == "auto" and not is_running_on_cluster()) - - -def get_algo_nick(revision, config_nick): - return "{revision}-{config_nick}".format(**locals()) - - -class IssueConfig(object): - """Hold information about a planner configuration. - - See FastDownwardExperiment.add_algorithm() for documentation of the - constructor's options. - - """ - def __init__(self, nick, component_options, - build_options=None, driver_options=None): - self.nick = nick - self.component_options = component_options - self.build_options = build_options - self.driver_options = driver_options - - -class IssueExperiment(FastDownwardExperiment): - """Subclass of FastDownwardExperiment with some convenience features.""" - - DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] - - DEFAULT_TABLE_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "generated", - "memory", - "planner_memory", - "planner_time", - "quality", - "run_dir", - "score_evaluations", - "score_expansions", - "score_generated", - "score_memory", - "score_search_time", - "score_total_time", - "search_time", - "total_time", - ] - - DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ - "evaluations", - "expansions", - "expansions_until_last_jump", - "initial_h_value", - "memory", - "search_time", - "total_time", - ] - - PORTFOLIO_ATTRIBUTES = [ - "cost", - "coverage", - "error", - "plan_length", - "run_dir", - ] - - def __init__(self, revisions=None, configs=None, path=None, **kwargs): - """ - - You can either specify both *revisions* and *configs* or none - of them. If they are omitted, you will need to call - exp.add_algorithm() manually. - - If *revisions* is given, it must be a non-empty list of - revision identifiers, which specify which planner versions to - use in the experiment. The same versions are used for - translator, preprocessor and search. :: - - IssueExperiment(revisions=["issue123", "4b3d581643"], ...) - - If *configs* is given, it must be a non-empty list of - IssueConfig objects. :: - - IssueExperiment(..., configs=[ - IssueConfig("ff", ["--search", "eager_greedy(ff())"]), - IssueConfig( - "lama", [], - driver_options=["--alias", "seq-sat-lama-2011"]), - ]) - - If *path* is specified, it must be the path to where the - experiment should be built (e.g. - /home/john/experiments/issue123/exp01/). If omitted, the - experiment path is derived automatically from the main - script's filename. Example:: - - script = experiments/issue123/exp01.py --> - path = experiments/issue123/data/issue123-exp01/ - - """ - - path = path or get_data_dir() - - FastDownwardExperiment.__init__(self, path=path, **kwargs) - - if (revisions and not configs) or (not revisions and configs): - raise ValueError( - "please provide either both or none of revisions and configs") - - for rev in revisions: - for config in configs: - self.add_algorithm( - get_algo_nick(rev, config.nick), - get_repo_base(), - rev, - config.component_options, - build_options=config.build_options, - driver_options=config.driver_options) - - self._revisions = revisions - self._configs = configs - - @classmethod - def _is_portfolio(cls, config_nick): - return "fdss" in config_nick - - @classmethod - def get_supported_attributes(cls, config_nick, attributes): - if cls._is_portfolio(config_nick): - return [attr for attr in attributes - if attr in cls.PORTFOLIO_ATTRIBUTES] - return attributes - - def add_absolute_report_step(self, **kwargs): - """Add step that makes an absolute report. - - Absolute reports are useful for experiments that don't compare - revisions. - - The report is written to the experiment evaluation directory. - - All *kwargs* will be passed to the AbsoluteReport class. If the - keyword argument *attributes* is not specified, a default list - of attributes is used. :: - - exp.add_absolute_report_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - report = AbsoluteReport(**kwargs) - outfile = os.path.join( - self.eval_dir, - get_experiment_name() + "." + report.output_format) - self.add_report(report, outfile=outfile) - self.add_step( - 'publish-absolute-report', subprocess.call, ['publish', outfile]) - - def add_comparison_table_step(self, **kwargs): - """Add a step that makes pairwise revision comparisons. - - Create comparative reports for all pairs of Fast Downward - revisions. Each report pairs up the runs of the same config and - lists the two absolute attribute values and their difference - for all attributes in kwargs["attributes"]. - - All *kwargs* will be passed to the CompareConfigsReport class. - If the keyword argument *attributes* is not specified, a - default list of attributes is used. :: - - exp.add_comparison_table_step(attributes=["coverage"]) - - """ - kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) - - def make_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - compared_configs = [] - for config in self._configs: - config_nick = config.nick - compared_configs.append( - ("%s-%s" % (rev1, config_nick), - "%s-%s" % (rev2, config_nick), - "Diff (%s)" % config_nick)) - report = ComparativeReport(compared_configs, **kwargs) - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.%s" % ( - self.name, rev1, rev2, report.output_format)) - report(self.eval_dir, outfile) - - def publish_comparison_tables(): - for rev1, rev2 in itertools.combinations(self._revisions, 2): - outfile = os.path.join( - self.eval_dir, - "%s-%s-%s-compare.html" % (self.name, rev1, rev2)) - subprocess.call(["publish", outfile]) - - self.add_step("make-comparison-tables", make_comparison_tables) - self.add_step( - "publish-comparison-tables", publish_comparison_tables) - - def add_scatter_plot_step(self, relative=False, attributes=None, additional=[]): - """Add step creating (relative) scatter plots for all revision pairs. - - Create a scatter plot for each combination of attribute, - configuration and revisions pair. If *attributes* is not - specified, a list of common scatter plot attributes is used. - For portfolios all attributes except "cost", "coverage" and - "plan_length" will be ignored. :: - - exp.add_scatter_plot_step(attributes=["expansions"]) - - """ - if relative: - scatter_dir = os.path.join(self.eval_dir, "scatter-relative") - step_name = "make-relative-scatter-plots" - else: - scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") - step_name = "make-absolute-scatter-plots" - if attributes is None: - attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES - - def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): - name = "-".join([self.name, rev1, rev2, attribute, config_nick]) - if config_nick2 is not None: - name += "-" + config_nick2 - print("Make scatter plot for", name) - algo1 = get_algo_nick(rev1, config_nick) - algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) - report = ScatterPlotReport( - filter_algorithm=[algo1, algo2], - attributes=[attribute], - relative=relative, - get_category=lambda run1, run2: run1["domain"]) - report( - self.eval_dir, - os.path.join(scatter_dir, rev1 + "-" + rev2, name)) - - def make_scatter_plots(): - for config in self._configs: - print(config) - for rev1, rev2 in itertools.combinations(self._revisions, 2): - print(rev1, rev2) - for attribute in self.get_supported_attributes( - config.nick, attributes): - print(attribute) - make_scatter_plot(config.nick, rev1, rev2, attribute) - for nick1, nick2, rev1, rev2, attribute in additional: - make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) - - self.add_step(step_name, make_scatter_plots) diff --git a/experiments/issue999/landmark_parser.py b/experiments/issue999/landmark_parser.py deleted file mode 100755 index 943492471b..0000000000 --- a/experiments/issue999/landmark_parser.py +++ /dev/null @@ -1,29 +0,0 @@ -#! /usr/bin/env python - -import re - -from lab.parser import Parser - -parser = Parser() -parser.add_pattern( - "lmgraph_generation_time", - r"Landmark graph generation time: (.+)s", - type=float) -parser.add_pattern( - "landmarks", - r"Landmark graph contains (\d+) landmarks, of which \d+ are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_disjunctive", - r"Landmark graph contains \d+ landmarks, of which (\d+) are disjunctive and \d+ are conjunctive.", - type=int) -parser.add_pattern( - "landmarks_conjunctive", - r"Landmark graph contains \d+ landmarks, of which \d+ are disjunctive and (\d+) are conjunctive.", - type=int) -parser.add_pattern( - "orderings", - r"Landmark graph contains (\d+) orderings.", - type=int) - -parser.parse() diff --git a/experiments/issue999/optimal-v5.py b/experiments/issue999/optimal-v5.py deleted file mode 100755 index 51f79af9d4..0000000000 --- a/experiments/issue999/optimal-v5.py +++ /dev/null @@ -1,86 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctive", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time"), -] - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue999-base-seq-opt-bjolp", "issue999-v5-seq-opt-bjolp"), - ("issue999-base-lm_exhaust", "issue999-v5-lm_exhaust"), - ("issue999-base-lm_hm", "issue999-v5-lm_hm"), - ("issue999-base-seq-opt-bjolp-optimal", "issue999-v5-seq-opt-bjolp-optimal"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - - exp.add_report(report) - - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue999-base", "issue999-v5"] - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), - IssueConfig("lm_exhaust", ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - IssueConfig("lm_hm", ["--evaluator", "lmc=lmcount(lm_hm(m=2),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - IssueConfig("seq-opt-bjolp-optimal", ["--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="clemens.buechner@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -#exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue999/optimal-v6.py b/experiments/issue999/optimal-v6.py deleted file mode 100755 index 2b07ddba3a..0000000000 --- a/experiments/issue999/optimal-v6.py +++ /dev/null @@ -1,86 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctive", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time"), -] - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue999-base-seq-opt-bjolp", "issue999-v6-seq-opt-bjolp"), - ("issue999-base-lm_exhaust", "issue999-v6-lm_exhaust"), - ("issue999-base-lm_hm", "issue999-v6-lm_hm"), - ("issue999-base-seq-opt-bjolp-optimal", "issue999-v6-seq-opt-bjolp-optimal"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - - exp.add_report(report) - - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue999-base", "issue999-v6"] - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), - IssueConfig("lm_exhaust", ["--evaluator", "lmc=lmcount(lm_exhaust(),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - IssueConfig("lm_hm", ["--evaluator", "lmc=lmcount(lm_hm(m=2),admissible=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), - IssueConfig("seq-opt-bjolp-optimal", ["--evaluator", - "lmc=lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]),admissible=true,optimal=true)", - "--search", "astar(lmc,lazy_evaluator=lmc)"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="tho.keller@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -#exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue999/optimal.py b/experiments/issue999/optimal.py deleted file mode 100755 index 1b83370933..0000000000 --- a/experiments/issue999/optimal.py +++ /dev/null @@ -1,77 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctive", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time"), -] - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue999-base-seq-opt-bjolp", "issue999-v1-seq-opt-bjolp"), - ("issue999-base-seq-opt-bjolp", "issue999-v4-seq-opt-bjolp"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - - exp.add_report(report) - - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue999-base", "issue999-v1", "issue999-v4"] - -CONFIGS = [ - IssueConfig("seq-opt-bjolp", [], - driver_options=["--alias", "seq-opt-bjolp"]), -] - -SUITE = common_setup.DEFAULT_OPTIMAL_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="tho.keller@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -#exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue999/satisficing-v5.py b/experiments/issue999/satisficing-v5.py deleted file mode 100755 index 6e84c73e89..0000000000 --- a/experiments/issue999/satisficing-v5.py +++ /dev/null @@ -1,79 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctive", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time"), -] - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue999-base-lama-first", "issue999-v5-lama-first"), - ("issue999-base-lm_zg", "issue999-v5-lm_zg"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - - exp.add_report(report) - - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue999-base", "issue999-v5"] - -CONFIGS = [ - IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), - IssueConfig("lm_zg", ["--search", "eager_greedy([lmcount(lm_zg())])"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="tho.keller@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -#exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue999/satisficing-v6.py b/experiments/issue999/satisficing-v6.py deleted file mode 100755 index 24aa0d035d..0000000000 --- a/experiments/issue999/satisficing-v6.py +++ /dev/null @@ -1,79 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctive", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time"), -] - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue999-base-lama-first", "issue999-v6-lama-first"), - ("issue999-base-lm_zg", "issue999-v6-lm_zg"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - - exp.add_report(report) - - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue999-base", "issue999-v6"] - -CONFIGS = [ - IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), - IssueConfig("lm_zg", ["--search", "eager_greedy([lmcount(lm_zg())])"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="tho.keller@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -#exp.add_parse_again_step() - -exp.run_steps() - diff --git a/experiments/issue999/satisficing.py b/experiments/issue999/satisficing.py deleted file mode 100755 index 755ee02e5c..0000000000 --- a/experiments/issue999/satisficing.py +++ /dev/null @@ -1,78 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- - - -import os - -from lab.environments import LocalEnvironment, BaselSlurmEnvironment -from lab.reports import Attribute - -import common_setup -from common_setup import IssueConfig, IssueExperiment - -ATTRIBUTES = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES + [ - Attribute("landmarks", min_wins=False), - Attribute("landmarks_disjunctive", min_wins=False), - Attribute("landmarks_conjunctive", min_wins=False), - Attribute("orderings", min_wins=False), - Attribute("lmgraph_generation_time"), -] - -def make_comparison_table(): - report = common_setup.ComparativeReport( - algorithm_pairs=[ - ("issue999-base-lama-first", "issue999-v1-lama-first"), - ("issue999-base-lama-first", "issue999-v4-lama-first"), - ], attributes=ATTRIBUTES, - ) - outfile = os.path.join( - exp.eval_dir, "%s-compare.%s" % (exp.name, report.output_format) - ) - report(exp.eval_dir, outfile) - - exp.add_report(report) - - -DIR = os.path.dirname(os.path.abspath(__file__)) -SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] -BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] -REVISIONS = ["issue999-base", "issue999-v1", "issue999-v4"] - -CONFIGS = [ - IssueConfig("lama-first", [], - driver_options=["--alias", "lama-first"]), -] - -SUITE = common_setup.DEFAULT_SATISFICING_SUITE -ENVIRONMENT = BaselSlurmEnvironment( - partition="infai_2", - email="tho.keller@unibas.ch", - export=["PATH", "DOWNWARD_BENCHMARKS"], -) - -if common_setup.is_test_run(): - SUITE = IssueExperiment.DEFAULT_TEST_SUITE - ENVIRONMENT = LocalEnvironment(processes=2) - -exp = common_setup.IssueExperiment( - revisions=REVISIONS, - configs=CONFIGS, - environment=ENVIRONMENT, -) - -exp.add_suite(BENCHMARKS_DIR, SUITE) - -exp.add_parser(exp.ANYTIME_SEARCH_PARSER) -exp.add_parser(exp.EXITCODE_PARSER) -exp.add_parser(exp.PLANNER_PARSER) -exp.add_parser(exp.SINGLE_SEARCH_PARSER) -exp.add_parser("landmark_parser.py") - -exp.add_step("build", exp.build) -exp.add_step("start", exp.start_runs) -exp.add_fetcher(name="fetch") -exp.add_step("comparison table", make_comparison_table) -#exp.add_parse_again_step() - -exp.run_steps() -