diff --git a/misc/tests/configs.py b/misc/tests/configs.py index ed99114d06..fc116e41e2 100644 --- a/misc/tests/configs.py +++ b/misc/tests/configs.py @@ -12,7 +12,8 @@ def configs_optimal_core(): "astar(ipdb())"], "bjolp": [ "--evaluator", - "lmc=landmark_cost_partitioning(lm_merged([lm_rhw(),lm_hm(m=1)]))", + """lmc=landmark_cost_partitioning(lm_merged( + [lm_rhw(),lm_hm(m=1)]))""", "--search", "astar(lmc,lazy_evaluator=lmc)"], "astar_lmcut": [ diff --git a/src/search/parser/lexical_analyzer.cc b/src/search/parser/lexical_analyzer.cc index a127aed9b7..9a37360809 100644 --- a/src/search/parser/lexical_analyzer.cc +++ b/src/search/parser/lexical_analyzer.cc @@ -58,7 +58,7 @@ TokenStream split_tokens(const string &text) { for (const auto &type_and_expression : token_type_expressions) { TokenType token_type = type_and_expression.first; const regex &expression = type_and_expression.second; - if (regex_search(start, end, match, expression)) { + if (regex_search(start, end, match, expression, regex_constants::match_continuous)) { tokens.push_back({utils::tolower(match[1]), token_type}); start += match[0].length(); has_match = true;