From f75018fd06a3ad476ffff794fa92edabd578e8ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Salom=C3=A9=20Eriksson?= Date: Fri, 19 Jul 2024 12:26:48 +0200 Subject: [PATCH] [issue454] Move computation of negated axioms to search component Negated axioms are now only computed for heuristics that need it. Specifically these are: - additive heuristic - context-enhanced additive heuristic - causal graph heuristic - FF heuristic - max heuristic - landmark heuristics (landmark sum and landmark cost partitioning) Each heuristic computes the negated axioms separately. This can negatively affect performance if a search with several such heuristics is performed, as is for example done in LAMA (ff and landmark sum). Heuristics that support axioms but do not need negated axioms might now be faster tasks with axioms, since they avoid the unnecessary computation. A new enum option "axioms" has been added to all heuristics that need negated axioms to determine how the negated axioms are computed. - overapproximate_negated_cycles (default): Computes exact negation except for derived variables that have cyclic dependencies, which are trivially overapproximated (see issue453). This is the old behavior that can lead to an exponential explosion. - overapproximate_negated: Trivially overapproximates all negations. This avoids the exponential explosion but makes the heuristic weaker. --- src/search/CMakeLists.txt | 20 +- src/search/axioms.cc | 33 +- .../cartesian_abstractions/split_selector.cc | 3 +- .../subtask_generators.cc | 3 +- src/search/heuristics/additive_heuristic.cc | 8 +- src/search/heuristics/additive_heuristic.h | 1 + src/search/heuristics/cea_heuristic.cc | 7 +- src/search/heuristics/cea_heuristic.h | 2 + src/search/heuristics/cg_heuristic.cc | 9 +- src/search/heuristics/cg_heuristic.h | 2 + src/search/heuristics/ff_heuristic.cc | 8 +- src/search/heuristics/ff_heuristic.h | 1 + src/search/heuristics/max_heuristic.cc | 9 +- src/search/heuristics/max_heuristic.h | 1 + src/search/heuristics/relaxation_heuristic.cc | 20 +- src/search/heuristics/relaxation_heuristic.h | 9 +- .../landmark_cost_partitioning_heuristic.cc | 3 +- .../landmark_cost_partitioning_heuristic.h | 1 + src/search/landmarks/landmark_heuristic.cc | 27 +- src/search/landmarks/landmark_heuristic.h | 8 +- .../landmarks/landmark_sum_heuristic.cc | 4 +- src/search/landmarks/landmark_sum_heuristic.h | 1 + src/search/tasks/default_value_axioms_task.cc | 429 ++++++++++++++++++ src/search/tasks/default_value_axioms_task.h | 94 ++++ src/translate/axiom_rules.py | 171 ++----- src/translate/translate.py | 20 +- 26 files changed, 698 insertions(+), 196 deletions(-) create mode 100644 src/search/tasks/default_value_axioms_task.cc create mode 100644 src/search/tasks/default_value_axioms_task.h diff --git a/src/search/CMakeLists.txt b/src/search/CMakeLists.txt index 6adeaa7675..393c2b2ed9 100644 --- a/src/search/CMakeLists.txt +++ b/src/search/CMakeLists.txt @@ -577,6 +577,8 @@ create_fast_downward_library( SOURCES heuristics/array_pool heuristics/relaxation_heuristic + DEPENDS + default_value_axioms_task DEPENDENCY_ONLY ) @@ -606,6 +608,7 @@ create_fast_downward_library( SOURCES heuristics/cea_heuristic DEPENDS + default_value_axioms_task domain_transition_graph priority_queues task_properties @@ -617,6 +620,7 @@ create_fast_downward_library( SOURCES heuristics/cg_heuristic heuristics/cg_cache DEPENDS + default_value_axioms_task domain_transition_graph priority_queues task_properties @@ -700,6 +704,18 @@ create_fast_downward_library( DEPENDENCY_ONLY ) +create_fast_downward_library( + NAME default_value_axioms_task + HELP "Task transformation adding axioms describing under which circumstances a derived variable is set to its default value." + SOURCES + tasks/default_value_axioms_task + DEPENDS + core_tasks + sccs + task_properties + DEPENDENCY_ONLY +) + create_fast_downward_library( NAME causal_graph HELP "Causal Graph" @@ -846,6 +862,7 @@ create_fast_downward_library( landmarks/landmark_sum_heuristic landmarks/util DEPENDS + default_value_axioms_task lp_solver priority_queues successor_generator @@ -937,8 +954,7 @@ create_fast_downward_library( create_fast_downward_library( NAME sccs - HELP "Algorithm to compute the strongly connected components (SCCs) of a " - "directed graph." + HELP "Algorithm to compute the strongly connected components (SCCs) of a directed graph." SOURCES algorithms/sccs DEPENDENCY_ONLY diff --git a/src/search/axioms.cc b/src/search/axioms.cc index 57f97a2eb6..235234e759 100644 --- a/src/search/axioms.cc +++ b/src/search/axioms.cc @@ -22,35 +22,28 @@ AxiomEvaluator::AxiomEvaluator(const TaskProxy &task_proxy) { axiom_literals.emplace_back(var.get_domain_size()); // Initialize rules - // Since we are skipping some axioms, we cannot access them through - // their id position directly. - vector axiom_id_to_position(axioms.size(), -1); for (OperatorProxy axiom : axioms) { assert(axiom.get_effects().size() == 1); EffectProxy cond_effect = axiom.get_effects()[0]; FactPair effect = cond_effect.get_fact().get_pair(); int num_conditions = cond_effect.get_conditions().size(); - // Ignore axioms which set the variable to its default value. - if (effect.value != variables[effect.var].get_default_axiom_value()) { - AxiomLiteral *eff_literal = &axiom_literals[effect.var][effect.value]; - axiom_id_to_position[axiom.get_id()] = rules.size(); - rules.emplace_back( - num_conditions, effect.var, effect.value, eff_literal); - } + + // We don't allow axioms that set the variable to its default value. + assert(effect.value != variables[effect.var].get_default_axiom_value()); + AxiomLiteral *eff_literal = &axiom_literals[effect.var][effect.value]; + rules.emplace_back( + num_conditions, effect.var, effect.value, eff_literal); } // Cross-reference rules and literals for (OperatorProxy axiom : axioms) { - // Ignore axioms which set the variable to its default value. - int position = axiom_id_to_position[axiom.get_id()]; - if (position != -1) { - EffectProxy effect = axiom.get_effects()[0]; - for (FactProxy condition : effect.get_conditions()) { - int var_id = condition.get_variable().get_id(); - int val = condition.get_value(); - AxiomRule *rule = &rules[position]; - axiom_literals[var_id][val].condition_of.push_back(rule); - } + int id = axiom.get_id(); + EffectProxy effect = axiom.get_effects()[0]; + for (FactProxy condition : effect.get_conditions()) { + int var_id = condition.get_variable().get_id(); + int val = condition.get_value(); + AxiomRule *rule = &rules[id]; + axiom_literals[var_id][val].condition_of.push_back(rule); } } diff --git a/src/search/cartesian_abstractions/split_selector.cc b/src/search/cartesian_abstractions/split_selector.cc index ec6a7baa1c..d9a3b1c4fb 100644 --- a/src/search/cartesian_abstractions/split_selector.cc +++ b/src/search/cartesian_abstractions/split_selector.cc @@ -24,7 +24,8 @@ SplitSelector::SplitSelector( if (pick == PickSplit::MIN_HADD || pick == PickSplit::MAX_HADD) { additive_heuristic = utils::make_unique_ptr( - task, false, "h^add within CEGAR abstractions", + tasks::AxiomHandlingType::APPROXIMATE_NEGATIVE, task, + false, "h^add within CEGAR abstractions", utils::Verbosity::SILENT); additive_heuristic->compute_heuristic_for_cegar( task_proxy.get_initial_state()); diff --git a/src/search/cartesian_abstractions/subtask_generators.cc b/src/search/cartesian_abstractions/subtask_generators.cc index 8de4f82fce..ce38b099c6 100644 --- a/src/search/cartesian_abstractions/subtask_generators.cc +++ b/src/search/cartesian_abstractions/subtask_generators.cc @@ -35,7 +35,8 @@ class SortFactsByIncreasingHaddValues { explicit SortFactsByIncreasingHaddValues( const shared_ptr &task) : hadd(utils::make_unique_ptr( - task, false, "h^add within CEGAR abstractions", + tasks::AxiomHandlingType::APPROXIMATE_NEGATIVE, task, + false, "h^add within CEGAR abstractions", utils::Verbosity::SILENT)) { TaskProxy task_proxy(*task); hadd->compute_heuristic_for_cegar(task_proxy.get_initial_state()); diff --git a/src/search/heuristics/additive_heuristic.cc b/src/search/heuristics/additive_heuristic.cc index c86abc432b..aff35d07be 100644 --- a/src/search/heuristics/additive_heuristic.cc +++ b/src/search/heuristics/additive_heuristic.cc @@ -14,10 +14,12 @@ namespace additive_heuristic { const int AdditiveHeuristic::MAX_COST_VALUE; AdditiveHeuristic::AdditiveHeuristic( + tasks::AxiomHandlingType axioms, const shared_ptr &transform, bool cache_estimates, const string &description, utils::Verbosity verbosity) : RelaxationHeuristic( - transform, cache_estimates, description, verbosity), + axioms, transform, cache_estimates, description, + verbosity), did_write_overflow_warning(false) { if (log.is_at_least_normal()) { log << "Initializing additive heuristic..." << endl; @@ -153,7 +155,7 @@ class AdditiveHeuristicFeature AdditiveHeuristicFeature() : TypedFeature("add") { document_title("Additive heuristic"); - add_heuristic_options_to_feature(*this, "add"); + relaxation_heuristic::add_relaxation_heuristic_options_to_feature(*this, "add"); document_language_support("action costs", "supported"); document_language_support("conditional effects", "supported"); @@ -173,7 +175,7 @@ class AdditiveHeuristicFeature const plugins::Options &opts, const utils::Context &) const override { return plugins::make_shared_from_arg_tuples( - get_heuristic_arguments_from_options(opts) + relaxation_heuristic::get_relaxation_heuristic_arguments_from_options(opts) ); } }; diff --git a/src/search/heuristics/additive_heuristic.h b/src/search/heuristics/additive_heuristic.h index ae30961251..ad76e40417 100644 --- a/src/search/heuristics/additive_heuristic.h +++ b/src/search/heuristics/additive_heuristic.h @@ -66,6 +66,7 @@ class AdditiveHeuristic : public relaxation_heuristic::RelaxationHeuristic { int compute_add_and_ff(const State &state); public: AdditiveHeuristic( + tasks::AxiomHandlingType axioms, const std::shared_ptr &transform, bool cache_estimates, const std::string &description, utils::Verbosity verbosity); diff --git a/src/search/heuristics/cea_heuristic.cc b/src/search/heuristics/cea_heuristic.cc index 4233cd4c48..952f905220 100644 --- a/src/search/heuristics/cea_heuristic.cc +++ b/src/search/heuristics/cea_heuristic.cc @@ -409,9 +409,12 @@ int ContextEnhancedAdditiveHeuristic::compute_heuristic( } ContextEnhancedAdditiveHeuristic::ContextEnhancedAdditiveHeuristic( + tasks::AxiomHandlingType axioms, const shared_ptr &transform, bool cache_estimates, const string &description, utils::Verbosity verbosity) - : Heuristic(transform, cache_estimates, description, verbosity), + : Heuristic(tasks::get_default_value_axioms_task_if_needed( + transform, axioms), + cache_estimates, description, verbosity), min_action_cost(task_properties::get_min_operator_cost(task_proxy)) { if (log.is_at_least_normal()) { log << "Initializing context-enhanced additive heuristic..." << endl; @@ -450,6 +453,7 @@ class ContextEnhancedAdditiveHeuristicFeature ContextEnhancedAdditiveHeuristicFeature() : TypedFeature("cea") { document_title("Context-enhanced additive heuristic"); + tasks::add_axioms_option_to_feature(*this); add_heuristic_options_to_feature(*this, "cea"); document_language_support("action costs", "supported"); @@ -470,6 +474,7 @@ class ContextEnhancedAdditiveHeuristicFeature create_component(const plugins::Options &opts, const utils::Context &) const override { return plugins::make_shared_from_arg_tuples( + tasks::get_axioms_arguments_from_options(opts), get_heuristic_arguments_from_options(opts) ); } diff --git a/src/search/heuristics/cea_heuristic.h b/src/search/heuristics/cea_heuristic.h index 5b1547c71f..1774a3f8f8 100644 --- a/src/search/heuristics/cea_heuristic.h +++ b/src/search/heuristics/cea_heuristic.h @@ -6,6 +6,7 @@ #include "../heuristic.h" #include "../algorithms/priority_queues.h" +#include "../tasks/default_value_axioms_task.h" #include @@ -51,6 +52,7 @@ class ContextEnhancedAdditiveHeuristic : public Heuristic { virtual int compute_heuristic(const State &ancestor_state) override; public: ContextEnhancedAdditiveHeuristic( + tasks::AxiomHandlingType axioms, const std::shared_ptr &transform, bool cache_estimates, const std::string &description, utils::Verbosity verbosity); diff --git a/src/search/heuristics/cg_heuristic.cc b/src/search/heuristics/cg_heuristic.cc index 5da1623e9b..a8dea593db 100644 --- a/src/search/heuristics/cg_heuristic.cc +++ b/src/search/heuristics/cg_heuristic.cc @@ -17,10 +17,13 @@ using namespace domain_transition_graph; namespace cg_heuristic { CGHeuristic::CGHeuristic( - int max_cache_size, const shared_ptr &transform, + int max_cache_size, tasks::AxiomHandlingType axioms, + const shared_ptr &transform, bool cache_estimates, const string &description, utils::Verbosity verbosity) - : Heuristic(transform, cache_estimates, description, verbosity), + : Heuristic(tasks::get_default_value_axioms_task_if_needed( + transform, axioms), + cache_estimates, description, verbosity), cache_hits(0), cache_misses(0), helpful_transition_extraction_counter(0), @@ -295,6 +298,7 @@ class CGHeuristicFeature "maximum number of cached entries per variable (set to 0 to disable cache)", "1000000", plugins::Bounds("0", "infinity")); + tasks::add_axioms_option_to_feature(*this); add_heuristic_options_to_feature(*this, "cg"); document_language_support("action costs", "supported"); @@ -316,6 +320,7 @@ class CGHeuristicFeature const utils::Context &) const override { return plugins::make_shared_from_arg_tuples( opts.get("max_cache_size"), + tasks::get_axioms_arguments_from_options(opts), get_heuristic_arguments_from_options(opts) ); } diff --git a/src/search/heuristics/cg_heuristic.h b/src/search/heuristics/cg_heuristic.h index c7d738f634..7252e1607c 100644 --- a/src/search/heuristics/cg_heuristic.h +++ b/src/search/heuristics/cg_heuristic.h @@ -4,6 +4,7 @@ #include "../heuristic.h" #include "../algorithms/priority_queues.h" +#include "../tasks/default_value_axioms_task.h" #include #include @@ -45,6 +46,7 @@ class CGHeuristic : public Heuristic { public: explicit CGHeuristic( int max_cache_size, + tasks::AxiomHandlingType axiom_hanlding, const std::shared_ptr &transform, bool cache_estimates, const std::string &description, utils::Verbosity verbosity); diff --git a/src/search/heuristics/ff_heuristic.cc b/src/search/heuristics/ff_heuristic.cc index 683d118022..39f727258c 100644 --- a/src/search/heuristics/ff_heuristic.cc +++ b/src/search/heuristics/ff_heuristic.cc @@ -12,10 +12,12 @@ using namespace std; namespace ff_heuristic { // construction and destruction FFHeuristic::FFHeuristic( + tasks::AxiomHandlingType axioms, const shared_ptr &transform, bool cache_estimates, const string &description, utils::Verbosity verbosity) : AdditiveHeuristic( - transform, cache_estimates, description, verbosity), + axioms, transform, cache_estimates, description, + verbosity), relaxed_plan(task_proxy.get_operators().size(), false) { if (log.is_at_least_normal()) { log << "Initializing FF heuristic..." << endl; @@ -78,7 +80,7 @@ class FFHeuristicFeature FFHeuristicFeature() : TypedFeature("ff") { document_title("FF heuristic"); - add_heuristic_options_to_feature(*this, "ff"); + relaxation_heuristic::add_relaxation_heuristic_options_to_feature(*this, "ff"); document_language_support("action costs", "supported"); document_language_support("conditional effects", "supported"); @@ -98,7 +100,7 @@ class FFHeuristicFeature const plugins::Options &opts, const utils::Context &) const override { return plugins::make_shared_from_arg_tuples( - get_heuristic_arguments_from_options(opts) + relaxation_heuristic::get_relaxation_heuristic_arguments_from_options(opts) ); } }; diff --git a/src/search/heuristics/ff_heuristic.h b/src/search/heuristics/ff_heuristic.h index 86fa6a3893..d0302ed30c 100644 --- a/src/search/heuristics/ff_heuristic.h +++ b/src/search/heuristics/ff_heuristic.h @@ -33,6 +33,7 @@ class FFHeuristic : public additive_heuristic::AdditiveHeuristic { virtual int compute_heuristic(const State &ancestor_state) override; public: FFHeuristic( + tasks::AxiomHandlingType axioms, const std::shared_ptr &transform, bool cache_estimates, const std::string &description, utils::Verbosity verbosity); diff --git a/src/search/heuristics/max_heuristic.cc b/src/search/heuristics/max_heuristic.cc index 40051ec2fd..a0d78e2f8a 100644 --- a/src/search/heuristics/max_heuristic.cc +++ b/src/search/heuristics/max_heuristic.cc @@ -23,10 +23,12 @@ namespace max_heuristic { // construction and destruction HSPMaxHeuristic::HSPMaxHeuristic( + tasks::AxiomHandlingType axioms, const shared_ptr &transform, bool cache_estimates, const string &description, utils::Verbosity verbosity) : RelaxationHeuristic( - transform, cache_estimates, description, verbosity) { + axioms, transform, cache_estimates, description, + verbosity) { if (log.is_at_least_normal()) { log << "Initializing HSP max heuristic..." << endl; } @@ -107,7 +109,7 @@ class HSPMaxHeuristicFeature HSPMaxHeuristicFeature() : TypedFeature("hmax") { document_title("Max heuristic"); - add_heuristic_options_to_feature(*this, "hmax"); + relaxation_heuristic::add_relaxation_heuristic_options_to_feature(*this, "hmax"); document_language_support("action costs", "supported"); document_language_support("conditional effects", "supported"); @@ -127,7 +129,8 @@ class HSPMaxHeuristicFeature const plugins::Options &opts, const utils::Context &) const override { return plugins::make_shared_from_arg_tuples( - get_heuristic_arguments_from_options(opts)); + relaxation_heuristic::get_relaxation_heuristic_arguments_from_options(opts) + ); } }; diff --git a/src/search/heuristics/max_heuristic.h b/src/search/heuristics/max_heuristic.h index 9ab122d308..06364721d2 100644 --- a/src/search/heuristics/max_heuristic.h +++ b/src/search/heuristics/max_heuristic.h @@ -34,6 +34,7 @@ class HSPMaxHeuristic : public relaxation_heuristic::RelaxationHeuristic { virtual int compute_heuristic(const State &ancestor_state) override; public: HSPMaxHeuristic( + tasks::AxiomHandlingType axioms, const std::shared_ptr &transform, bool cache_estimates, const std::string &description, utils::Verbosity verbosity); diff --git a/src/search/heuristics/relaxation_heuristic.cc b/src/search/heuristics/relaxation_heuristic.cc index 7504a08f53..f98b3c6fbc 100644 --- a/src/search/heuristics/relaxation_heuristic.cc +++ b/src/search/heuristics/relaxation_heuristic.cc @@ -1,5 +1,6 @@ #include "relaxation_heuristic.h" +#include "../plugins/plugin.h" #include "../task_utils/task_properties.h" #include "../utils/collections.h" #include "../utils/logging.h" @@ -33,12 +34,29 @@ UnaryOperator::UnaryOperator( operator_no(operator_no) { } +void add_relaxation_heuristic_options_to_feature( + plugins::Feature &feature, const string &description) { + tasks::add_axioms_option_to_feature(feature); + add_heuristic_options_to_feature(feature, description); +} + +tuple, bool, string, + utils::Verbosity> +get_relaxation_heuristic_arguments_from_options(const plugins::Options &opts) { + return tuple_cat( + tasks::get_axioms_arguments_from_options(opts), + get_heuristic_arguments_from_options(opts)); +} + // construction and destruction RelaxationHeuristic::RelaxationHeuristic( + tasks::AxiomHandlingType axioms, const shared_ptr &transform, bool cache_estimates, const string &description, utils::Verbosity verbosity) - : Heuristic(transform, cache_estimates, description, verbosity) { + : Heuristic(tasks::get_default_value_axioms_task_if_needed( + transform, axioms), + cache_estimates, description, verbosity) { // Build propositions. propositions.resize(task_properties::get_num_facts(task_proxy)); diff --git a/src/search/heuristics/relaxation_heuristic.h b/src/search/heuristics/relaxation_heuristic.h index be9ea54fe4..218f461090 100644 --- a/src/search/heuristics/relaxation_heuristic.h +++ b/src/search/heuristics/relaxation_heuristic.h @@ -5,6 +5,7 @@ #include "../heuristic.h" +#include "../tasks/default_value_axioms_task.h" #include "../utils/collections.h" #include @@ -111,12 +112,18 @@ class RelaxationHeuristic : public Heuristic { Proposition *get_proposition(const FactProxy &fact); public: RelaxationHeuristic( + tasks::AxiomHandlingType axioms, const std::shared_ptr &transform, bool cache_estimates, const std::string &description, utils::Verbosity verbosity); virtual bool dead_ends_are_reliable() const override; }; -} +extern void add_relaxation_heuristic_options_to_feature( + plugins::Feature &feature, const std::string &description); +extern std::tuple, + bool, std::string, utils::Verbosity> +get_relaxation_heuristic_arguments_from_options(const plugins::Options &opts); +} #endif diff --git a/src/search/landmarks/landmark_cost_partitioning_heuristic.cc b/src/search/landmarks/landmark_cost_partitioning_heuristic.cc index a66524ce8f..b1adc45d0a 100644 --- a/src/search/landmarks/landmark_cost_partitioning_heuristic.cc +++ b/src/search/landmarks/landmark_cost_partitioning_heuristic.cc @@ -18,12 +18,13 @@ namespace landmarks { LandmarkCostPartitioningHeuristic::LandmarkCostPartitioningHeuristic( const shared_ptr &lm_factory, bool pref, bool prog_goal, bool prog_gn, bool prog_r, + tasks::AxiomHandlingType axioms, const shared_ptr &transform, bool cache_estimates, const string &description, utils::Verbosity verbosity, CostPartitioningMethod cost_partitioning, bool alm, lp::LPSolverType lpsolver) : LandmarkHeuristic( - pref, transform, cache_estimates, description, verbosity) { + axioms, pref, transform, cache_estimates, description, verbosity) { if (log.is_at_least_normal()) { log << "Initializing landmark cost partitioning heuristic..." << endl; } diff --git a/src/search/landmarks/landmark_cost_partitioning_heuristic.h b/src/search/landmarks/landmark_cost_partitioning_heuristic.h index be64e867cd..fb57717c04 100644 --- a/src/search/landmarks/landmark_cost_partitioning_heuristic.h +++ b/src/search/landmarks/landmark_cost_partitioning_heuristic.h @@ -26,6 +26,7 @@ class LandmarkCostPartitioningHeuristic : public LandmarkHeuristic { LandmarkCostPartitioningHeuristic( const std::shared_ptr &lm_factory, bool pref, bool prog_goal, bool prog_gn, bool prog_r, + tasks::AxiomHandlingType axioms, const std::shared_ptr &transform, bool cache_estimates, const std::string &description, utils::Verbosity verbosity, diff --git a/src/search/landmarks/landmark_heuristic.cc b/src/search/landmarks/landmark_heuristic.cc index b03c119a13..cd0d75e1bf 100644 --- a/src/search/landmarks/landmark_heuristic.cc +++ b/src/search/landmarks/landmark_heuristic.cc @@ -14,10 +14,12 @@ using namespace std; namespace landmarks { LandmarkHeuristic::LandmarkHeuristic( - bool use_preferred_operators, + tasks::AxiomHandlingType axioms, bool use_preferred_operators, const shared_ptr &transform, bool cache_estimates, const string &description, utils::Verbosity verbosity) - : Heuristic(transform, cache_estimates, description, verbosity), + : Heuristic(tasks::get_default_value_axioms_task_if_needed( + transform, axioms), + cache_estimates, description, verbosity), use_preferred_operators(use_preferred_operators), successor_generator(nullptr) { } @@ -27,14 +29,17 @@ void LandmarkHeuristic::initialize( bool prog_gn, bool prog_r) { /* Actually, we should test if this is the root task or a - CostAdaptedTask *of the root task*, but there is currently no good - way to do this, so we use this incomplete, slightly less safe test. + task that *only* transforms costs and/or adds negated axioms. + However, there is currently no good way to do this, so we use + this incomplete, slightly less safe test. */ if (task != tasks::g_root_task - && dynamic_cast(task.get()) == nullptr) { + && dynamic_cast(task.get()) == nullptr + && dynamic_cast(task.get()) == nullptr) { cerr << "The landmark heuristics currently only support " - << "task transformations that modify the operator costs. " - << "See issues 845 and 686 for details." << endl; + << "task transformations that modify the operator costs " + << "or add negated axioms. See issues 845, 686 and 454 " + << "for details." << endl; utils::exit_with(utils::ExitCode::SEARCH_UNSUPPORTED); } @@ -223,6 +228,7 @@ void add_landmark_heuristic_options_to_feature( "prog_gn", "Use greedy-necessary ordering progression.", "true"); feature.add_option( "prog_r", "Use reasonable ordering progression.", "true"); + tasks::add_axioms_option_to_feature(feature); add_heuristic_options_to_feature(feature, description); feature.document_property("preferred operators", @@ -230,7 +236,8 @@ void add_landmark_heuristic_options_to_feature( } tuple, bool, bool, bool, bool, - shared_ptr, bool, string, utils::Verbosity> + tasks::AxiomHandlingType, shared_ptr, bool, string, + utils::Verbosity> get_landmark_heuristic_arguments_from_options( const plugins::Options &opts) { return tuple_cat( @@ -239,8 +246,8 @@ get_landmark_heuristic_arguments_from_options( opts.get("pref"), opts.get("prog_goal"), opts.get("prog_gn"), - opts.get("prog_r") - ), + opts.get("prog_r")), + tasks::get_axioms_arguments_from_options(opts), get_heuristic_arguments_from_options(opts)); } } diff --git a/src/search/landmarks/landmark_heuristic.h b/src/search/landmarks/landmark_heuristic.h index 1cdab0128d..55f0deab00 100644 --- a/src/search/landmarks/landmark_heuristic.h +++ b/src/search/landmarks/landmark_heuristic.h @@ -3,6 +3,8 @@ # include "../heuristic.h" +#include "../tasks/default_value_axioms_task.h" + class ConstBitsetView; namespace successor_generator { @@ -44,6 +46,7 @@ class LandmarkHeuristic : public Heuristic { virtual int compute_heuristic(const State &ancestor_state) override; public: LandmarkHeuristic( + tasks::AxiomHandlingType axioms, bool use_preferred_operators, const std::shared_ptr &transform, bool cache_estimates, const std::string &description, @@ -63,8 +66,9 @@ class LandmarkHeuristic : public Heuristic { extern void add_landmark_heuristic_options_to_feature( plugins::Feature &feature, const std::string &description); extern std::tuple, bool, bool, bool, - bool, std::shared_ptr, bool, - std::string, utils::Verbosity> + bool, tasks::AxiomHandlingType, + std::shared_ptr, bool, std::string, + utils::Verbosity> get_landmark_heuristic_arguments_from_options( const plugins::Options &opts); } diff --git a/src/search/landmarks/landmark_sum_heuristic.cc b/src/search/landmarks/landmark_sum_heuristic.cc index bcfd4ae587..51f447a54f 100644 --- a/src/search/landmarks/landmark_sum_heuristic.cc +++ b/src/search/landmarks/landmark_sum_heuristic.cc @@ -33,10 +33,12 @@ static bool are_dead_ends_reliable( LandmarkSumHeuristic::LandmarkSumHeuristic( const shared_ptr &lm_factory, bool pref, bool prog_goal, bool prog_gn, bool prog_r, + tasks::AxiomHandlingType axioms, const shared_ptr &transform, bool cache_estimates, const string &description, utils::Verbosity verbosity) : LandmarkHeuristic( - pref, transform, cache_estimates, description, verbosity), + axioms, pref, transform, cache_estimates, + description, verbosity), dead_ends_reliable( are_dead_ends_reliable(lm_factory, task_proxy)) { if (log.is_at_least_normal()) { diff --git a/src/search/landmarks/landmark_sum_heuristic.h b/src/search/landmarks/landmark_sum_heuristic.h index 268d552afd..70775e6587 100644 --- a/src/search/landmarks/landmark_sum_heuristic.h +++ b/src/search/landmarks/landmark_sum_heuristic.h @@ -19,6 +19,7 @@ class LandmarkSumHeuristic : public LandmarkHeuristic { LandmarkSumHeuristic( const std::shared_ptr &lm_factory, bool pref, bool prog_goal, bool prog_gn, bool prog_r, + tasks::AxiomHandlingType axioms, const std::shared_ptr &transform, bool cache_estimates, const std::string &description, utils::Verbosity verbosity); diff --git a/src/search/tasks/default_value_axioms_task.cc b/src/search/tasks/default_value_axioms_task.cc new file mode 100644 index 0000000000..61241f7b59 --- /dev/null +++ b/src/search/tasks/default_value_axioms_task.cc @@ -0,0 +1,429 @@ +#include "default_value_axioms_task.h" + +#include "../task_proxy.h" + +#include "../algorithms/sccs.h" +#include "../task_utils/task_properties.h" + +#include +#include +#include +#include + +using namespace std; +using utils::ExitCode; + +namespace tasks { +DefaultValueAxiomsTask::DefaultValueAxiomsTask( + const shared_ptr &parent, + AxiomHandlingType axioms) + : DelegatingTask(parent), + axioms(axioms), + default_value_axioms_start_index(parent->get_num_axioms()) { + TaskProxy task_proxy(*parent); + + /* + (non)default_dependencies store for each variable v all derived + variables that appear with their (non)default value in the body of + an axiom that sets v. + axiom_ids_for_var stores for each derived variable v which + axioms set v to their nondefault value. + Note that the vectors go over *all* variables (also non-derived ones), + but only the indices that correspond to a variable ID of a derived + variable actually have content. + */ + vector> nondefault_dependencies(task_proxy.get_variables().size()); + vector> default_dependencies(task_proxy.get_variables().size()); + vector> axiom_ids_for_var(task_proxy.get_variables().size()); + for (OperatorProxy axiom: task_proxy.get_axioms()) { + EffectProxy effect = axiom.get_effects()[0]; + int head_var = effect.get_fact().get_variable().get_id(); + axiom_ids_for_var[head_var].push_back(axiom.get_id()); + for (FactProxy cond: effect.get_conditions()) { + VariableProxy var_proxy = cond.get_variable(); + if (var_proxy.is_derived()) { + int var = cond.get_variable().get_id(); + if (cond.get_value() == var_proxy.get_default_axiom_value()) { + default_dependencies[head_var].push_back(var); + } else { + nondefault_dependencies[head_var].push_back(var); + } + } + } + } + + /* + Get the sccs induced by nondefault dependencies. + We do not include default dependencies because they cannot + introduce additional cycles (this would imply that the axioms + are not stratifiable, which is already checked in the translator). + */ + vector> sccs; + vector *> var_to_scc; + // We don't need the sccs if we set axioms "v=default <- {}" everywhere. + if (axioms == AxiomHandlingType::APPROXIMATE_NEGATIVE_CYCLES) { + sccs = sccs::compute_maximal_sccs(nondefault_dependencies); + var_to_scc = vector *>( + task_proxy.get_variables().size(), nullptr); + for (int i = 0; i < (int)sccs.size(); ++i) { + for (int var: sccs[i]) { + var_to_scc[var] = &sccs[i]; + } + } + } + + unordered_set default_value_needed = + get_vars_with_relevant_default_value(nondefault_dependencies, + default_dependencies, + var_to_scc); + + for (int var: default_value_needed) { + vector &axiom_ids = axiom_ids_for_var[var]; + int default_value = + task_proxy.get_variables()[var].get_default_axiom_value(); + + if (axioms == AxiomHandlingType::APPROXIMATE_NEGATIVE + || var_to_scc[var]->size() > 1) { + /* + If there is a cyclic dependency between several derived + variables, the "obvious" way of negating the formula + defining the derived variable is semantically wrong + (see issue453). + + In this case we perform a naive overapproximation + instead, which assumes that derived variables occurring + in the cycle can be false unconditionally. This is good + enough for correctness of the code that uses these + default value axioms, but loses accuracy. Negating the + axioms in an exact (non-overapproximating) way is possible + but more expensive (again, see issue453). + */ + default_value_axioms.emplace_back( + FactPair(var, default_value), vector()); + } else { + add_default_value_axioms_for_var( + FactPair(var, default_value), axiom_ids); + } + } +} + +/* + Collect for which derived variables it is relevant to know how they + can obtain their default value. This is done by tracking for all + derived variables which of their values are needed. + + Initially, we know that var=val is needed if it appears in a goal or + operator condition. Then we iteratively do the following: + a) If var=val is needed and var'=nondefault is in the body of an + axiom setting var=nondefault, then var'=val is needed. + b) If var=val is needed and var'=default is in the body of an axiom + setting var=nondefault, then var'=!val is needed, where + - !val=nondefault if val=default + - !val=default if val=nondefault + (var and var' are always derived variables.) + + If var=default is needed but we already know that the axioms we will + introduce for var=default are going to have an empty body, then we don't + apply a)/b) (because the axiom for var=default will not depend on anything). +*/ +unordered_set DefaultValueAxiomsTask::get_vars_with_relevant_default_value( + const vector> &nondefault_dependencies, + const vector> &default_dependencies, + const vector *> &var_to_scc) { + // Store which derived vars are needed default (true) / nondefault(false). + utils::HashSet> needed; + + TaskProxy task_proxy(*parent); + + // Collect derived variables that occur as their default value. + for (const FactProxy &goal: task_proxy.get_goals()) { + VariableProxy var_proxy = goal.get_variable(); + if (var_proxy.is_derived()) { + bool default_value = + goal.get_value() == var_proxy.get_default_axiom_value(); + needed.emplace(goal.get_pair().var, default_value); + } + } + for (OperatorProxy op: task_proxy.get_operators()) { + for (FactProxy condition: op.get_preconditions()) { + VariableProxy var_proxy = condition.get_variable(); + if (var_proxy.is_derived()) { + bool default_value = + condition.get_value() == var_proxy.get_default_axiom_value(); + needed.emplace(condition.get_pair().var, default_value); + } + } + for (EffectProxy effect: op.get_effects()) { + for (FactProxy condition: effect.get_conditions()) { + VariableProxy var_proxy = condition.get_variable(); + if (var_proxy.is_derived()) { + bool default_value = + condition.get_value() == var_proxy.get_default_axiom_value(); + needed.emplace(condition.get_pair().var, default_value); + } + } + } + } + + deque> to_process(needed.begin(), needed.end()); + while (!to_process.empty()) { + int var = to_process.front().first; + bool default_value = to_process.front().second; + to_process.pop_front(); + + /* + If we process a default value and already know that the axiom we + will introduce has an empty body (either because we trivially + overapproximate everything or because the variable has cyclic + dependencies), then the axiom (and thus the current variable/value + pair) doesn't depend on anything. + */ + if ((default_value) && + (axioms == AxiomHandlingType::APPROXIMATE_NEGATIVE + || var_to_scc[var]->size() > 1)) { + continue; + } + + for (int nondefault_dep : nondefault_dependencies[var]) { + auto insert_retval = needed.emplace(nondefault_dep, default_value); + if (insert_retval.second) { + to_process.emplace_back(nondefault_dep, default_value); + } + } + for (int default_dep : default_dependencies[var]) { + auto insert_retval = needed.emplace(default_dep, !default_value); + if (insert_retval.second) { + to_process.emplace_back(default_dep, !default_value); + } + } + } + + unordered_set default_needed; + for (pair entry : needed) { + if (entry.second) { + default_needed.insert(entry.first); + } + } + return default_needed; +} + + +void DefaultValueAxiomsTask::add_default_value_axioms_for_var( + FactPair head, vector &axiom_ids) { + TaskProxy task_proxy(*parent); + + /* + If no axioms change the variable to its non-default value, + then the default is always true. + */ + if (axiom_ids.empty()) { + default_value_axioms.emplace_back(head, vector()); + return; + } + + vector> conditions_as_cnf; + conditions_as_cnf.reserve(axiom_ids.size()); + for (int axiom_id : axiom_ids) { + OperatorProxy axiom = task_proxy.get_axioms()[axiom_id]; + conditions_as_cnf.emplace_back(); + for (FactProxy fact : axiom.get_effects()[0].get_conditions()) { + int var_id = fact.get_variable().get_id(); + int num_vals = task_proxy.get_variables()[var_id].get_domain_size(); + for (int value = 0; value < num_vals; ++value) { + if (value != fact.get_value()) { + conditions_as_cnf.back().insert({var_id, value}); + } + } + } + } + + // We can see multiplying out the cnf as collecting all hitting sets. + set current; + unordered_set current_vars; + set> hitting_sets; + collect_non_dominated_hitting_sets_recursively( + conditions_as_cnf, 0, current, current_vars, hitting_sets); + + for (const set &c : hitting_sets) { + default_value_axioms.emplace_back( + head, vector(c.begin(), c.end())); + } +} + + +void DefaultValueAxiomsTask::collect_non_dominated_hitting_sets_recursively( + const vector> &set_of_sets, size_t index, + set &hitting_set, unordered_set &hitting_set_vars, + set> &results) { + if (index == set_of_sets.size()) { + /* + Check whether the hitting set is dominated. + If we find a fact f in hitting_set such that no set in the + set of sets is covered by *only* f, then hitting_set \ {f} + is still a hitting set that dominates hitting_set. + */ + set not_uniquely_used(hitting_set); + for (const set &set : set_of_sets) { + vector intersection; + set_intersection(set.begin(), set.end(), + hitting_set.begin(), hitting_set.end(), + back_inserter(intersection)); + if (intersection.size() == 1) { + not_uniquely_used.erase(intersection[0]); + } + } + if (not_uniquely_used.empty()) { + results.insert(hitting_set); + } + return; + } + + const set &set = set_of_sets[index]; + for (const FactPair &elem : set) { + /* + If the current set is covered with the current hitting set + elements, we continue with the next set. + */ + if (hitting_set.find(elem) != hitting_set.end()) { + collect_non_dominated_hitting_sets_recursively( + set_of_sets, index + 1, hitting_set, hitting_set_vars, + results); + return; + } + } + + for (const FactPair &elem : set) { + // We don't allow choosing different facts from the same variable. + if (hitting_set_vars.find(elem.var) != hitting_set_vars.end()) { + continue; + } + + hitting_set.insert(elem); + hitting_set_vars.insert(elem.var); + collect_non_dominated_hitting_sets_recursively( + set_of_sets, index + 1, hitting_set, hitting_set_vars, + results); + hitting_set.erase(elem); + hitting_set_vars.erase(elem.var); + } +} + + +int DefaultValueAxiomsTask::get_operator_cost(int index, bool is_axiom) const { + if (!is_axiom || index < default_value_axioms_start_index) { + return parent->get_operator_cost(index, is_axiom); + } + + return 0; +} + +string DefaultValueAxiomsTask::get_operator_name(int index, bool is_axiom) const { + if (!is_axiom || index < default_value_axioms_start_index) { + return parent->get_operator_name(index, is_axiom); + } + + return ""; +} + +int DefaultValueAxiomsTask::get_num_operator_preconditions(int index, bool is_axiom) const { + if (!is_axiom || index < default_value_axioms_start_index) { + return parent->get_num_operator_preconditions(index, is_axiom); + } + + return 1; +} + +FactPair DefaultValueAxiomsTask::get_operator_precondition( + int op_index, int fact_index, bool is_axiom) const { + if (!is_axiom || (op_index < default_value_axioms_start_index)) { + return parent->get_operator_precondition(op_index, fact_index, is_axiom); + } + + assert(fact_index == 0); + FactPair head = default_value_axioms[op_index - default_value_axioms_start_index].head; + return FactPair(head.var, 1 - head.value); +} + +int DefaultValueAxiomsTask::get_num_operator_effects(int op_index, bool is_axiom) const { + if (!is_axiom || op_index < default_value_axioms_start_index) { + return parent->get_num_operator_effects(op_index, is_axiom); + } + + return 1; +} + +int DefaultValueAxiomsTask::get_num_operator_effect_conditions( + int op_index, int eff_index, bool is_axiom) const { + if (!is_axiom || op_index < default_value_axioms_start_index) { + return parent->get_num_operator_effect_conditions( + op_index, eff_index, is_axiom); + } + + assert(eff_index == 0); + return default_value_axioms[op_index - default_value_axioms_start_index].condition.size(); +} + +FactPair DefaultValueAxiomsTask::get_operator_effect_condition( + int op_index, int eff_index, int cond_index, bool is_axiom) const { + if (!is_axiom || op_index < default_value_axioms_start_index) { + return parent->get_operator_effect_condition( + op_index, eff_index, cond_index, is_axiom); + } + + assert(eff_index == 0); + return default_value_axioms[op_index - default_value_axioms_start_index].condition[cond_index]; +} + +FactPair DefaultValueAxiomsTask::get_operator_effect( + int op_index, int eff_index, bool is_axiom) const { + if (!is_axiom || op_index < default_value_axioms_start_index) { + return parent->get_operator_effect(op_index, eff_index, is_axiom); + } + + assert(eff_index == 0); + return default_value_axioms[op_index - default_value_axioms_start_index].head; +} + +int DefaultValueAxiomsTask::get_num_axioms() const { + return parent->get_num_axioms() + default_value_axioms.size(); +} + +shared_ptr get_default_value_axioms_task_if_needed( + const shared_ptr &task, + AxiomHandlingType axioms) { + TaskProxy proxy(*task); + if (task_properties::has_axioms(proxy)) { + return make_shared( + DefaultValueAxiomsTask(task, axioms)); + } + return task; +} + +void add_axioms_option_to_feature(plugins::Feature &feature) { + feature.add_option( + "axioms", + "How to compute axioms that describe how the negated " + "(=default) value of a derived variable can be achieved.", + "approximate_negative_cycles"); +} + +tuple get_axioms_arguments_from_options( + const plugins::Options &opts) { + return make_tuple( + opts.get("axioms")); +} + +static plugins::TypedEnumPlugin _enum_plugin({ + {"approximate_negative", + "Overapproximate negated axioms for all derived variables by " + "setting an empty condition, indicating the default value can " + "always be achieved for free."}, + {"approximate_negative_cycles", + "Overapproximate negated axioms for all derived variables which " + "have cyclic dependencies by setting an empty condition, " + "indicating the default value can always be achieved for free." + "For all other derived variables, the negated axioms are computed" + "exactly. Note that this can potentially lead to a combinatorial " + "explosion"} + }); +} diff --git a/src/search/tasks/default_value_axioms_task.h b/src/search/tasks/default_value_axioms_task.h new file mode 100644 index 0000000000..1511a0d631 --- /dev/null +++ b/src/search/tasks/default_value_axioms_task.h @@ -0,0 +1,94 @@ +#ifndef TASKS_DEFAULT_VALUE_AXIOMS_TASK_H +#define TASKS_DEFAULT_VALUE_AXIOMS_TASK_H + +#include "delegating_task.h" + +#include "../plugins/plugin.h" + +#include + +/* + This task transformation adds explicit axioms for how the default value + of derived variables can be achieved. In general this is done as follows: + Given derived variable v with n axioms v <- c_1, ..., v <- c_n, add axioms + that together represent ¬v <- ¬c_1 ^ ... ^ ¬c_n. + + Notes: + - Technically, this transformation is illegal since it adds axioms which set + derived variables to their default value. Do not use it for search; the + axiom evaluator expects that all axioms set variables to their non-default + values (checked with an assertion). + - We assume that derived variables are binary. + - THE TRANSFORMATION CAN BE SLOW! The rule ¬v <- ¬c_1 ^ ... ^ ¬c_n must + be split up into axioms whose conditions are simple conjunctions. Since + all c_i are also simple conjunctions, this amounts to converting a CNF + to a DNF. + - The transformation is not exact. For derived variables v that have cyclic + dependencies, the general approach is incorrect. We instead trivially + overapproximate such cases with "¬v <- T" (see issue453). + - If you wish to avoid the combinatorial explosion from converting a + CNF to a DNF, you can set the option "axioms" to "approximate_negative"; + this will use the trivial overapproximation "¬v <- T" for *all* derived + variables. Note that this can negatively impact the heuristic values. + */ + +namespace tasks { +enum class AxiomHandlingType { + APPROXIMATE_NEGATIVE, APPROXIMATE_NEGATIVE_CYCLES +}; + +struct DefaultValueAxiom { + FactPair head; + std::vector condition; + + DefaultValueAxiom(FactPair head, std::vector &&condition) + : head(head), condition(condition) {} +}; + +class DefaultValueAxiomsTask : public DelegatingTask { + AxiomHandlingType axioms; + std::vector default_value_axioms; + int default_value_axioms_start_index; + + std::unordered_set get_vars_with_relevant_default_value( + const std::vector> &nondefault_dependencies, + const std::vector> &default_dependencies, + const std::vector *> &var_to_scc); + void add_default_value_axioms_for_var( + FactPair head, std::vector &axiom_ids); + void collect_non_dominated_hitting_sets_recursively( + const std::vector> &set_of_sets, size_t index, + std::set &hitting_set, + std::unordered_set &hitting_set_vars, + std::set> &results); +public: + explicit DefaultValueAxiomsTask( + const std::shared_ptr &parent, + AxiomHandlingType axioms); + virtual ~DefaultValueAxiomsTask() override = default; + + virtual int get_operator_cost(int index, bool is_axiom) const override; + virtual std::string get_operator_name(int index, bool is_axiom) const override; + virtual int get_num_operator_preconditions(int index, bool is_axiom) const override; + virtual FactPair get_operator_precondition( + int op_index, int fact_index, bool is_axiom) const override; + virtual int get_num_operator_effects(int op_index, bool is_axiom) const override; + virtual int get_num_operator_effect_conditions( + int op_index, int eff_index, bool is_axiom) const override; + virtual FactPair get_operator_effect_condition( + int op_index, int eff_index, int cond_index, bool is_axiom) const override; + virtual FactPair get_operator_effect( + int op_index, int eff_index, bool is_axiom) const override; + + virtual int get_num_axioms() const override; +}; + +extern std::shared_ptr get_default_value_axioms_task_if_needed( + const std::shared_ptr &task, + AxiomHandlingType axioms); +extern void add_axioms_option_to_feature(plugins::Feature &feature); +extern std::tuple get_axioms_arguments_from_options( + const plugins::Options &opts); +} + +#endif diff --git a/src/translate/axiom_rules.py b/src/translate/axiom_rules.py index 036ee93596..4a2b4511ba 100644 --- a/src/translate/axiom_rules.py +++ b/src/translate/axiom_rules.py @@ -26,16 +26,16 @@ def __init__(self, axioms): else: self.positive_dependencies[head].add(body_atom) - # Remove all information for variables whose literals are not necessary. + # Remove all information for variables that are not necessary. # We do not need to remove single entries from the dicts because if the key # (= head of an axiom) is relevant, then all its values (= body of axiom) # must be relevant by definition. - def remove_unnecessary_variables(self, necessary_literals): - for var in self.derived_variables.copy(): - if var not in necessary_literals and var.negate() not in necessary_literals: - self.derived_variables.remove(var) + def remove_unnecessary_variables(self, necessary_atoms): + for var in self.derived_variables: + if var not in necessary_atoms: self.positive_dependencies.pop(var, None) self.negative_dependencies.pop(var, None) + self.derived_variables &= necessary_atoms class AxiomCluster(object): @@ -48,63 +48,46 @@ def __init__(self, derived_variables): # in the body. self.positive_children = set() self.negative_children = set() - self.needed_negatively = False self.layer = 0 def handle_axioms(operators, axioms, goals, layer_strategy): clusters = compute_clusters(axioms, goals, operators) axiom_layers = compute_axiom_layers(clusters, layer_strategy) - - # TODO: It would be cleaner if these negated rules were an implementation - # detail of the heuristics in the search component that make use of them - # rather than part of the translation process. They should be removed in - # the future. Similarly, it would be a good idea to remove the notion of - # axiom layers and derived variable default values from the output. - # (All derived variables should be binary and default to false.) - with timers.timing("Computing negative axioms"): - compute_negative_axioms(clusters) - axioms = get_axioms(clusters) if DEBUG: verify_layering_condition(axioms, axiom_layers) return axioms, axiom_layers -def compute_necessary_literals(dependencies, goals, operators): - necessary_literals = set() +def compute_necessary_atoms(dependencies, goals, operators): + necessary_atoms = set() for g in goals: - if g.positive() in dependencies.derived_variables: - necessary_literals.add(g) + g = g.positive() + if g in dependencies.derived_variables: + necessary_atoms.add(g) for op in operators: - derived_preconditions = (l for l in op.precondition if l.positive() + derived_preconditions = (l.positive() for l in op.precondition if l.positive() in dependencies.derived_variables) - necessary_literals.update(derived_preconditions) + necessary_atoms.update(derived_preconditions) for condition, effect in chain(op.add_effects, op.del_effects): for c in condition: - if c.positive() in dependencies.derived_variables: - necessary_literals.add(c) - necessary_literals.add(c.negate()) - - literals_to_process = list(necessary_literals) - while literals_to_process: - l = literals_to_process.pop() - atom = l.positive() - for body_atom in dependencies.positive_dependencies[atom]: - l2 = body_atom.negate() if l.negated else body_atom - if l2 not in necessary_literals: - literals_to_process.append(l2) - necessary_literals.add(l2) - for body_atom in dependencies.negative_dependencies[atom]: - l2 = body_atom if l.negated else body_atom.negate() - if l2 not in necessary_literals: - literals_to_process.append(l2) - necessary_literals.add(l2) - - return necessary_literals + c_atom = c.positive() + if c_atom in dependencies.derived_variables: + necessary_atoms.add(c_atom) + + atoms_to_process = list(necessary_atoms) + while atoms_to_process: + atom = atoms_to_process.pop() + for body_atom in chain(dependencies.positive_dependencies[atom], + dependencies.negative_dependencies[atom]): + if body_atom not in necessary_atoms: + atoms_to_process.append(body_atom) + necessary_atoms.add(body_atom) + return necessary_atoms # Compute strongly connected components of the dependency graph. @@ -167,19 +150,17 @@ def compute_clusters(axioms, goals, operators): dependencies = AxiomDependencies(axioms) # Compute necessary literals and prune unnecessary vars from dependencies. - necessary_literals = compute_necessary_literals(dependencies, goals, operators) - dependencies.remove_unnecessary_variables(necessary_literals) + necessary_atoms = compute_necessary_atoms(dependencies, goals, operators) + dependencies.remove_unnecessary_variables(necessary_atoms) groups = get_strongly_connected_components(dependencies) clusters = [AxiomCluster(group) for group in groups] - # Compute mapping from variables to their clusters and set needed_negatively. + # Compute mapping from variables to their clusters. variable_to_cluster = {} for cluster in clusters: for variable in cluster.variables: variable_to_cluster[variable] = cluster - if variable.negate() in necessary_literals: - cluster.needed_negatively = True # Assign axioms to their clusters. for axiom in axioms: @@ -247,60 +228,6 @@ def compute_axiom_layers(clusters, strategy): return layers -def compute_negative_axioms(clusters): - for cluster in clusters: - if cluster.needed_negatively: - if len(cluster.variables) > 1: - # If the cluster contains multiple variables, they have a cyclic - # positive dependency. In this case, the "obvious" way of - # negating the formula defining the derived variable is - # semantically wrong. For details, see issue453. - # - # Therefore, in this case we perform a naive overapproximation - # instead, which assumes that derived variables occurring in - # such clusters can be false unconditionally. This is good - # enough for correctness of the code that uses these negated - # axioms (within heuristics of the search component), but loses - # accuracy. Negating the rules in an exact - # (non-overapproximating) way is possible but more expensive. - # Again, see issue453 for details. - for variable in cluster.variables: - name = cluster.axioms[variable][0].name - negated_axiom = pddl.PropositionalAxiom(name, [], variable.negate()) - cluster.axioms[variable].append(negated_axiom) - else: - variable = next(iter(cluster.variables)) - negated_axioms = negate(cluster.axioms[variable]) - cluster.axioms[variable] += negated_axioms - - -def negate(axioms): - assert axioms - result = [pddl.PropositionalAxiom(axioms[0].name, [], axioms[0].effect.negate())] - for axiom in axioms: - condition = axiom.condition - if len(condition) == 0: - # The derived fact we want to negate is triggered with an - # empty condition, so it is always true and its negation - # is always false. - return [] - elif len(condition) == 1: # Handle easy special case quickly. - new_literal = condition[0].negate() - for result_axiom in result: - result_axiom.condition.append(new_literal) - else: - new_result = [] - for literal in condition: - literal = literal.negate() - for result_axiom in result: - new_axiom = result_axiom.clone() - new_axiom.condition.append(literal) - new_result.append(new_axiom) - result = new_result - result = compute_simplified_axioms(result) - return result - - def get_axioms(clusters): axioms = [] for cluster in clusters: @@ -312,13 +239,12 @@ def get_axioms(clusters): def verify_layering_condition(axioms, axiom_layers): # This function is only used for debugging. variables_in_heads = set() - literals_in_heads = set() variables_with_layers = set() for axiom in axioms: head = axiom.effect - variables_in_heads.add(head.positive()) - literals_in_heads.add(head) + assert not head.negated + variables_in_heads.add(head) variables_with_layers = set(axiom_layers.keys()) # 1. A variable has a defined layer iff it appears in a head. @@ -335,39 +261,20 @@ def verify_layering_condition(axioms, axiom_layers): assert isinstance(layer, int) assert layer >= 0 - # 2. For every rule head <- ... cond ... where cond is a literal - # of a derived variable where the layer of head is equal to - # the layer of cond, cond occurs with the same polarity in heads. - # - # Note regarding issue454 and issue453: Because of the negated axioms - # mentioned in these issues, a derived variable may appear with *both* - # polarities in heads. This makes this test less strong than it would - # be otherwise. When these issues are addressed and axioms only occur - # with one polarity in heads, this test will remain correct in its - # current form, but it will be able to detect more violations of the - # layering property. + # 2. For every rule head <- ... cond ... where cond is a literal of + # a derived variable, the layer of cond is strictly smaller than the + # layer of cond or it is equal and cond is an atom (not a negative + # literal). print("Verifying 2...") for axiom in axioms: head = axiom.effect - head_positive = head.positive() - body = axiom.condition - for cond in body: - cond_positive = cond.positive() - if (cond_positive in variables_in_heads and - axiom_layers[cond_positive] == axiom_layers[head_positive]): - assert cond in literals_in_heads - - # 3. For every rule head <- ... cond ... where cond is a literal - # of a derived variable, the layer of head is greater or equal - # to the layer of cond. - print("Verifying 3...") - for axiom in axioms: - head = axiom.effect - head_positive = head.positive() + head_layer = axiom_layers[head] body = axiom.condition for cond in body: cond_positive = cond.positive() - if cond_positive in variables_in_heads: + if cond_positive in variables_in_heads: # cond is derived variable + cond_layer = axiom_layers[cond_positive] + assert (cond_layer <= head_layer), (cond_layer, head_layer) # We need the assertion to be on a single line for # our error handler to be able to print the line. - assert (axiom_layers[cond_positive] <= axiom_layers[head_positive]), (axiom_layers[cond_positive], axiom_layers[head_positive]) + assert (not cond.negated or cond_layer < head_layer), (cond_layer, head_layer) diff --git a/src/translate/translate.py b/src/translate/translate.py index e060e8a591..d2df8cafb8 100755 --- a/src/translate/translate.py +++ b/src/translate/translate.py @@ -379,18 +379,14 @@ def translate_strips_axiom(axiom, dictionary, ranges, mutex_dict, mutex_ranges): ranges, mutex_dict, mutex_ranges) if conditions is None: return [] - if axiom.effect.negated: - [(var, _)] = dictionary[axiom.effect.positive()] - effect = (var, ranges[var] - 1) - else: - [effect] = dictionary[axiom.effect] - # Here we exploit that due to the invariant analysis algorithm derived - # variables cannot have more than one representation in the dictionary, - # even with the full encoding. They can never be part of a non-trivial - # mutex group. - axioms = [] - for condition in conditions: - axioms.append(sas_tasks.SASAxiom(condition.items(), effect)) + assert not axiom.effect.negated + [effect] = dictionary[axiom.effect] + # Here we exploit that due to the invariant analysis algorithm derived + # variables cannot have more than one representation in the dictionary, + # even with the full encoding. They can never be part of a non-trivial + # mutex group. + axioms = [sas_tasks.SASAxiom(condition.items(), effect) + for condition in conditions] return axioms