diff --git a/.dockerignore b/.dockerignore index 34d9c074f7df..6afcdc010f73 100644 --- a/.dockerignore +++ b/.dockerignore @@ -8,3 +8,4 @@ rasa/tests rasa/scripts data/ examples/ +docker-data/* diff --git a/.github/scripts/download_pretrained.py b/.github/scripts/download_pretrained.py index 1ba712fe7237..c97da8ee6dd2 100644 --- a/.github/scripts/download_pretrained.py +++ b/.github/scripts/download_pretrained.py @@ -52,7 +52,7 @@ def get_model_name_and_weights_from_config( if model_name not in model_class_dict: raise KeyError( f"'{model_name}' not a valid model name. Choose from " - f"{str(list(model_class_dict.keys()))} or create" + f"{list(model_class_dict.keys())!s} or create" f"a new class inheriting from this class to support your model." ) diff --git a/.github/tests/test_download_pretrained.py b/.github/tests/test_download_pretrained.py index dd583ddaf03c..0b47dfdd2ca2 100644 --- a/.github/tests/test_download_pretrained.py +++ b/.github/tests/test_download_pretrained.py @@ -23,7 +23,9 @@ def test_download_pretrained_lmf_exists_with_model_name(): config = yaml.load(CONFIG_FPATH) steps = config.get("pipeline", []) - step = list(filter(lambda x: x["name"] == download_pretrained.COMP_NAME, steps))[0] + step = list( # noqa: RUF015 + filter(lambda x: x["name"] == download_pretrained.COMP_NAME, steps) + )[0] step["model_name"] = "roberta" step["cache_dir"] = "/this/dir" @@ -41,7 +43,9 @@ def test_download_pretrained_unknown_model_name(): config = yaml.load(CONFIG_FPATH) steps = config.get("pipeline", []) - step = list(filter(lambda x: x["name"] == download_pretrained.COMP_NAME, steps))[0] + step = list( # noqa: RUF015 + filter(lambda x: x["name"] == download_pretrained.COMP_NAME, steps) + )[0] step["model_name"] = "unknown" with tempfile.NamedTemporaryFile("w+") as fp: @@ -56,7 +60,9 @@ def test_download_pretrained_multiple_model_names(): config = yaml.load(CONFIG_FPATH) steps = config.get("pipeline", []) - step = list(filter(lambda x: x["name"] == download_pretrained.COMP_NAME, steps))[0] + step = list( # noqa: RUF015 + filter(lambda x: x["name"] == download_pretrained.COMP_NAME, steps) + )[0] step_new = deepcopy(step) step_new["model_name"] = "roberta" steps.append(step_new) @@ -74,7 +80,9 @@ def test_download_pretrained_with_model_name_and_nondefault_weight(): config = yaml.load(CONFIG_FPATH) steps = config.get("pipeline", []) - step = list(filter(lambda x: x["name"] == download_pretrained.COMP_NAME, steps))[0] + step = list( # noqa: RUF015 + filter(lambda x: x["name"] == download_pretrained.COMP_NAME, steps) + )[0] step["model_name"] = "bert" step["model_weights"] = "bert-base-uncased" @@ -91,7 +99,9 @@ def test_download_pretrained_lmf_doesnt_exists(): config = yaml.load(CONFIG_FPATH) steps = config.get("pipeline", []) - step = list(filter(lambda x: x["name"] == download_pretrained.COMP_NAME, steps))[0] + step = list( # noqa: RUF015 + filter(lambda x: x["name"] == download_pretrained.COMP_NAME, steps) + )[0] steps.remove(step) with tempfile.NamedTemporaryFile("w+") as fp: diff --git a/.github/workflows/continous-integration.yml b/.github/workflows/continous-integration.yml index daec3ee4ce83..8a8758a25e2c 100644 --- a/.github/workflows/continous-integration.yml +++ b/.github/workflows/continous-integration.yml @@ -352,9 +352,15 @@ jobs: (Get-ItemProperty "HKLM:System\CurrentControlSet\Control\FileSystem").LongPathsEnabled Set-ItemProperty 'HKLM:\System\CurrentControlSet\Control\FileSystem' -Name 'LongPathsEnabled' -value 0 - - name: Install ddtrace - if: needs.changes.outputs.backend == 'true' - run: poetry run pip install -U ddtrace + - name: Install ddtrace on Linux + if: needs.changes.outputs.backend == 'true' && matrix.os == 'ubuntu-22.04' + run: poetry run pip install -U 'ddtrace<2.0.0' + + - name: Install ddtrace on Windows + if: needs.changes.outputs.backend == 'true' && matrix.os == 'windows-2019' + run: | + .\.venv\Scripts\activate + py -m pip install -U 'ddtrace<2.0.0' - name: Test Code 🔍 (multi-process) if: needs.changes.outputs.backend == 'true' @@ -492,9 +498,15 @@ jobs: (Get-ItemProperty "HKLM:System\CurrentControlSet\Control\FileSystem").LongPathsEnabled Set-ItemProperty 'HKLM:\System\CurrentControlSet\Control\FileSystem' -Name 'LongPathsEnabled' -value 0 - - name: Install ddtrace - if: needs.changes.outputs.backend == 'true' - run: poetry run pip install -U ddtrace + - name: Install ddtrace on Linux + if: needs.changes.outputs.backend == 'true' && matrix.os == 'ubuntu-22.04' + run: poetry run pip install -U 'ddtrace<2.0.0' + + - name: Install ddtrace on Windows + if: needs.changes.outputs.backend == 'true' && matrix.os == 'windows-2019' + run: | + .\.venv\Scripts\activate + py -m pip install -U 'ddtrace<2.0.0' - name: Test Code 🔍 (multi-process) if: needs.changes.outputs.backend == 'true' @@ -519,8 +531,8 @@ jobs: path: | ${{ github.workspace }}/${{ matrix.test }}-coverage - prepare_coverage_reports: - name: Prepare coverage reports + prepare_coverage_reports_analyse_with_sonarcloud: + name: Prepare coverage reports and Analyse coverage with Sonarcloud if: github.ref_type != 'tag' runs-on: ubuntu-22.04 # Always upload results even if tests failed @@ -561,21 +573,6 @@ jobs: coverage combine "${final_dir}/"* coverage xml - sonarcloud: - name: SonarCloud Scan - if: github.ref_type != 'tag' - runs-on: ubuntu-22.04 - needs: - - prepare_coverage_reports - - steps: - - name: Checkout git repository 🕝 - if: needs.changes.outputs.backend == 'true' - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c - with: - # Disabling shallow clone is recommended for improving relevancy of coverage reporting - fetch-depth: 0 - - name: Analyse code with SonarCloud uses: sonarsource/sonarcloud-github-action@5875562561d22a34be0c657405578705a169af6c env: @@ -586,7 +583,7 @@ jobs: -Dsonar.organization=rasahq -Dsonar.projectKey=RasaHQ_rasa -Dsonar.sources=. - -Dsonar.python.coverage.reportPaths=tests_coverage/coverage.xml + -Dsonar.python.coverage.reportPaths=${{ github.workspace }}/tests_coverage -Dsonar.host.url=https://sonarcloud.io -Dsonar.verbose=true @@ -1055,8 +1052,11 @@ jobs: run: | sudo swapoff -a sudo rm -f /swapfile + sudo rm -rf "$AGENT_TOOLSDIRECTORY" sudo apt clean - docker image prune -a + docker image prune -a -f + docker volume prune -f + docker container prune -f df -h - name: Read Poetry Version 🔢 @@ -1084,6 +1084,9 @@ jobs: run: | docker buildx bake --set *.platform=linux/amd64,linux/arm64 -f docker/docker-bake.hcl ${{ matrix.image }} + - name: Check how much space is left after Docker build + run: df -h + - name: Push image with main tag 📦 if: needs.changes.outputs.docker == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main' && github.repository == 'RasaHQ/rasa' run: | diff --git a/.typo-ci.yml b/.typo-ci.yml index bcb9c4c75be6..9a6e5205558d 100644 --- a/.typo-ci.yml +++ b/.typo-ci.yml @@ -62,27 +62,43 @@ excluded_files: # # Any typos we should ignore? excluded_words: + - CDD + - Comerica + - ConveRTFeaturizer + - ConveRTTokenizer + - HookimplMarker + - Juste + - NLG + - README + - Tanja + - Vova - analytics + - anonymization + - anonymized - asyncio + - backends - bot - bot's - cdd - - CDD - cmdline + - conftest - conveRT - - ConveRTFeaturizer - - ConveRTTokenizer + - crf + - crfentityextractor - crfsuite + - crypto - custom-nlg-service + - customizable - daksh + - dataset - db's - - deque - - docusaurus - - non-latin - deduplicate - deduplication + - deque + - docusaurus - donath - - matplotlib + - dslim + - entitysynonymmapper - extractor - fbmessenger - featurization @@ -95,13 +111,17 @@ excluded_words: - forni - gzip - gzipped + - hallo - hftransformersnlp + - hookimpl - initializer - instaclient - - jwt - - jwt's + - ish + - jieba - jupyter - jupyterhub + - jwt + - jwt's - karpathy - keras - knowledgebase @@ -110,101 +130,82 @@ excluded_words: - llm - luis - matmul + - matplotlib - mattermost - memoization + - memoizationpolicy - miniconda - mitie - - mitiefeaturizer - mitie's + - mitiefeaturizer - mitienlp - - dataset - mongod - mrkdown - mrkdwn - myio - mymodelname - myuser - - numpy - networkx + - ngram + - nlg - nlu - nlu's + - non-latin + - numpy - perceptron + - pii-management - pika - pika's - - jieba + - pluggy + - pre - pretrained - prototyper + - prototyper - pycodestyle - pykwalify - pymessenger - pyobject - python-engineio - - pre - - customizable - quickstart - rasa - rasa's - readthedocs + - regexes + - regexfeaturizer - regularizer - repo - rst + - ruamel + - rustc + - rustup + - rustup-init - sanic - sanitization - scipy - sklearn - socketio + - spaCy + - spaCy's - spacy - spacyfeaturizer - spacynlp - - ish - - spaCy - - spaCy's - - README - - crf - - backends - - whitespaced - - ngram - subsampled - testagent + - thisismysecret + - tokenization - tokenize - tokenized - - tokenization - tokenizer - tokenizers - tokenizing - typoci - unfeaturized - unschedule - - wsgi - - ruamel - - prototyper - - hallo - - crypto - - regexes + - venv - walkthroughs - webexteams - - venv - - regexfeaturizer - - crfentityextractor - - Comerica - - entitysynonymmapper - - memoizationpolicy - - NLG - - nlg - - Juste - - Tanja - - Vova - - rustup - - rustup-init - - rustc - - conftest + - whitespaced - winpty - - pii-management - - anonymization - - anonymized - - dslim - - pluggy - - HookimplMarker - - hookimpl + - wsgi spellcheck_filenames: false diff --git a/3.7.0b1/main_plain/.config/rasa/global.yml b/3.7.0b1/main_plain/.config/rasa/global.yml deleted file mode 100644 index c5dcee2da8c9..000000000000 --- a/3.7.0b1/main_plain/.config/rasa/global.yml +++ /dev/null @@ -1,4 +0,0 @@ -metrics: - enabled: true - rasa_user_id: 003ff8fbd6e04031b5597b37356022d4 - date: 2023-09-12 13:20:59.423434 diff --git a/CHANGELOG.mdx b/CHANGELOG.mdx index 5c8847ad6904..2a34aba3c3c8 100644 --- a/CHANGELOG.mdx +++ b/CHANGELOG.mdx @@ -16,6 +16,16 @@ https://github.com/RasaHQ/rasa/tree/main/changelog/ . --> +## [3.6.9] - 2023-09-15 + +Rasa 3.6.9 (2023-09-15) +### Improvements +- [#12778](https://github.com/rasahq/rasa/issues/12778): Added additional method `fingerprint_addon` to the `GraphComponent` interface to allow inclusion of external data into the fingerprint calculation of a component + +### Bugfixes +- [#12790](https://github.com/rasahq/rasa/issues/12790): Fixed `KeyError` which resulted when `domain_responses` doesn't exist as a keyword argument while using a custom action dispatcher with nlg server. + + ## [3.6.8] - 2023-08-30 Rasa 3.6.8 (2023-08-30) diff --git a/README.md b/README.md index 832b6527b00f..ac0d9d60ce5e 100644 --- a/README.md +++ b/README.md @@ -338,7 +338,7 @@ While this table represents our target release frequency, we reserve the right t Our End of Life policy defines how long a given release is considered supported, as well as how long a release is considered to be still in active development or maintenance. -The maintentance duration and end of life for every release are shown on our website as part of the [Product Release and Maintenance Policy](https://rasa.com/rasa-product-release-and-maintenance-policy/). +The maintenance duration and end of life for every release are shown on our website as part of the [Product Release and Maintenance Policy](https://rasa.com/rasa-product-release-and-maintenance-policy/). ### Cutting a Major / Minor release #### A week before release day diff --git a/changelog/12827.improvement.md b/changelog/12827.improvement.md new file mode 100644 index 000000000000..f1b7573c8d32 --- /dev/null +++ b/changelog/12827.improvement.md @@ -0,0 +1 @@ +Improved handling of last batch during DIET and TED training. The last batch is discarded if it contains less than half a batch size of data. \ No newline at end of file diff --git a/changelog/12852.improvement.md b/changelog/12852.improvement.md new file mode 100644 index 000000000000..0fea8ac19d52 --- /dev/null +++ b/changelog/12852.improvement.md @@ -0,0 +1 @@ +Added `username` to the connection parameters for `RedisLockStore` and `RedisTrackerStore` \ No newline at end of file diff --git a/changelog/12868.doc.md b/changelog/12868.doc.md new file mode 100644 index 000000000000..47971402fda7 --- /dev/null +++ b/changelog/12868.doc.md @@ -0,0 +1 @@ +Remove the Playground from docs. diff --git a/changelog/12901.improvement.md b/changelog/12901.improvement.md new file mode 100644 index 000000000000..663ab7ff7af4 --- /dev/null +++ b/changelog/12901.improvement.md @@ -0,0 +1 @@ +Added Schema file and schema validation for flows. \ No newline at end of file diff --git a/changelog/1557.improvement.md b/changelog/1557.improvement.md new file mode 100644 index 000000000000..87f4b1e62a88 --- /dev/null +++ b/changelog/1557.improvement.md @@ -0,0 +1,8 @@ +Added environment variables to configure JWT and auth token. +For JWT the following environment variables are available: +- JWT_SECRET +- JWT_METHOD +- JWT_PRIVATE_KEY + +For auth token the following environment variable is available: +- AUTH_TOKEN diff --git a/data/test_endpoints/example_endpoints.yml b/data/test_endpoints/example_endpoints.yml index fccc4f9c4594..2e07554cee98 100644 --- a/data/test_endpoints/example_endpoints.yml +++ b/data/test_endpoints/example_endpoints.yml @@ -13,6 +13,7 @@ tracker_store: url: localhost port: 6379 db: 0 + username: username password: password key_prefix: conversation record_exp: 30000 @@ -20,6 +21,19 @@ tracker_store: ssl_keyfile: "keyfile.key" ssl_certfile: "certfile.crt" ssl_ca_certs: "my-bundle.ca-bundle" +# example of redis external lock store config +lock_store: + type: redis + url: localhost + port: 6379 + db: 0 + username: username + password: password + key_prefix: lock + use_ssl: True + ssl_keyfile: "keyfile.key" + ssl_certfile: "certfile.crt" + ssl_ca_certs: "my-bundle.ca-bundle" # example of mongoDB external tracker store config #tracker_store: #type: mongod diff --git a/data/test_prompt_templates/test_prompt.jinja2 b/data/test_prompt_templates/test_prompt.jinja2 new file mode 100644 index 000000000000..34aaf4e27336 --- /dev/null +++ b/data/test_prompt_templates/test_prompt.jinja2 @@ -0,0 +1 @@ +This is a test prompt. diff --git a/data/test_trackers/tracker_moodbot.json b/data/test_trackers/tracker_moodbot.json index acdcac89a5cf..7fc6db7830d7 100644 --- a/data/test_trackers/tracker_moodbot.json +++ b/data/test_trackers/tracker_moodbot.json @@ -34,6 +34,7 @@ "followup_action": null, "slots": { "dialogue_stack": null, + "flow_hashes": null, "name": null, "requested_slot": null, "return_value": null, diff --git a/docker/.dockerignore b/docker/.dockerignore new file mode 100644 index 000000000000..cfd46602c2a5 --- /dev/null +++ b/docker/.dockerignore @@ -0,0 +1 @@ +docker-data/* \ No newline at end of file diff --git a/docs/docs/http-api.mdx b/docs/docs/http-api.mdx index 794e66bd2d30..d96e7476018d 100644 --- a/docs/docs/http-api.mdx +++ b/docs/docs/http-api.mdx @@ -66,6 +66,18 @@ rasa run \ --auth-token thisismysecret ``` +You can also use environment variable `AUTH_TOKEN` to set the auth token: +``` +AUTH_TOKEN=thisismysecret +``` + +:::tip Security best practice + +We recommend that you use environment variables to store +and share sensitive information such as tokens and secrets +when deploying Rasa as Docker container as they will not be stored in your shell history. +::: + Any clients sending requests to the server must pass the token as a query parameter, or the request will be rejected. For example, to fetch a tracker from the server: @@ -85,6 +97,18 @@ rasa run \ --jwt-secret thisismysecret ``` +You can also use environment variable `JWT_SECRET` to set the JWT secret: +``` +JWT_SECRET=thisismysecret +``` + +:::tip Security best practice + +We recommend that you use environment variables to store +and share sensitive information such as tokens and secrets +when deploying Rasa as Docker container as they will not be stored in your shell history. +::: + If you want to sign a JWT token with asymmetric algorithms, you can specify the JWT private key to the `--jwt-private-key` CLI argument. You must pass the public key to the `--jwt-secret` argument, and also specify the algorithm to the `--jwt-method` argument: @@ -97,6 +121,20 @@ rasa run \ --jwt-method RS512 ``` +You can also use environment variables to configure JWT: +``` +JWT_SECRET= +JWT_PRIVATE_KEY= +JWT_METHOD=RS512 +``` + +:::tip Security best practice + +We recommend that you use environment variables to store +and share sensitive information such as tokens and secrets +when deploying Rasa as Docker container as they will not be stored in your shell history. +::: + Client requests to the server will need to contain a valid JWT token in the `Authorization` header that is signed using this secret and the `HS256` algorithm e.g. diff --git a/docs/docs/llms/large-language-models.mdx b/docs/docs/llms/large-language-models.mdx index 363a761b5854..6d599854ae35 100644 --- a/docs/docs/llms/large-language-models.mdx +++ b/docs/docs/llms/large-language-models.mdx @@ -6,9 +6,12 @@ className: hide abstract: --- +import RasaProLabel from "@theme/RasaProLabel"; import RasaLabsLabel from "@theme/RasaLabsLabel"; import RasaLabsBanner from "@theme/RasaLabsBanner"; + + diff --git a/docs/docs/llms/llm-custom.mdx b/docs/docs/llms/llm-custom.mdx index c3a7716f69f9..f2f93ba74ed3 100644 --- a/docs/docs/llms/llm-custom.mdx +++ b/docs/docs/llms/llm-custom.mdx @@ -5,9 +5,12 @@ title: Customizing LLM based Components abstract: --- +import RasaProLabel from "@theme/RasaProLabel"; import RasaLabsLabel from "@theme/RasaLabsLabel"; import RasaLabsBanner from "@theme/RasaLabsBanner"; + + diff --git a/docs/docs/llms/llm-intent.mdx b/docs/docs/llms/llm-intent.mdx index 5ac51fc7edd8..73d8564bba27 100644 --- a/docs/docs/llms/llm-intent.mdx +++ b/docs/docs/llms/llm-intent.mdx @@ -7,10 +7,13 @@ abstract: | a method called retrieval augmented generation (RAG). --- +import RasaProLabel from "@theme/RasaProLabel"; import RasaLabsLabel from "@theme/RasaLabsLabel"; import RasaLabsBanner from "@theme/RasaLabsBanner"; import LLMIntentClassifierImg from "./llm-IntentClassifier-docs.jpg"; + + diff --git a/docs/docs/llms/llm-intentless.mdx b/docs/docs/llms/llm-intentless.mdx index d9c7f3a41897..90af26e16f90 100644 --- a/docs/docs/llms/llm-intentless.mdx +++ b/docs/docs/llms/llm-intentless.mdx @@ -4,14 +4,17 @@ sidebar_label: Intentless Dialogues with LLMs title: Intentless Policy - LLMs for intentless dialogues abstract: | The intentless policy uses large language models to drive a conversation - forward without relying on intent predictions. + forward without relying on intent predictions. --- +import RasaProLabel from "@theme/RasaProLabel"; import RasaLabsLabel from "@theme/RasaLabsLabel"; import RasaLabsBanner from "@theme/RasaLabsBanner"; import intentlessPolicyInteraction from "./intentless-policy-interaction.png"; import intentlessMeaningCompounds from "./intentless-meaning-compounds.png"; + + diff --git a/docs/docs/llms/llm-nlg.mdx b/docs/docs/llms/llm-nlg.mdx index 40eb32113fa5..9e093562108e 100644 --- a/docs/docs/llms/llm-nlg.mdx +++ b/docs/docs/llms/llm-nlg.mdx @@ -8,9 +8,12 @@ abstract: | of the conversation into account. --- +import RasaProLabel from "@theme/RasaProLabel"; import RasaLabsLabel from "@theme/RasaLabsLabel"; import RasaLabsBanner from "@theme/RasaLabsBanner"; + + diff --git a/docs/docs/llms/llm-setup.mdx b/docs/docs/llms/llm-setup.mdx index 36b8a004c911..4e9cd3022acf 100644 --- a/docs/docs/llms/llm-setup.mdx +++ b/docs/docs/llms/llm-setup.mdx @@ -3,15 +3,18 @@ id: llm-setup sidebar_label: Setting up LLMs title: Setting up LLMs abstract: | - Instructions on how to setup and configure Large Language Models from - OpenAI, Cohere, and other providers. + Instructions on how to setup and configure Large Language Models from + OpenAI, Cohere, and other providers. Here you'll learn what you need to configure and how you can customize LLMs to work efficiently with your specific use case. --- +import RasaProLabel from "@theme/RasaProLabel"; import RasaLabsLabel from "@theme/RasaLabsLabel"; import RasaLabsBanner from "@theme/RasaLabsBanner"; + + diff --git a/docs/docs/lock-stores.mdx b/docs/docs/lock-stores.mdx index 5889d953eac8..8f0a1b66a285 100644 --- a/docs/docs/lock-stores.mdx +++ b/docs/docs/lock-stores.mdx @@ -156,6 +156,8 @@ The `ConcurrentRedisLockStore` recreates the `TicketLock` from the persisted `Ti - `key_prefix` (default: `None`): The prefix to prepend to lock store keys. Must be alphanumeric + - `username` (default: `None`): Username used for authentication + - `password` (default: `None`): Password used for authentication (`None` equals no authentication) diff --git a/docs/docs/tracker-stores.mdx b/docs/docs/tracker-stores.mdx index f3ac71f4a2d1..f0485583c451 100644 --- a/docs/docs/tracker-stores.mdx +++ b/docs/docs/tracker-stores.mdx @@ -219,6 +219,8 @@ To set up Rasa with Redis the following steps are required: * `key_prefix` (default: `None`): The prefix to prepend to tracker store keys. Must be alphanumeric +* `username` (default: `None`): Username used for authentication + * `password` (default: `None`): Password used for authentication (`None` equals no authentication) diff --git a/docs/themes/theme-custom/theme/RasaLabsBanner/index.jsx b/docs/themes/theme-custom/theme/RasaLabsBanner/index.jsx index 361506d6a34c..247be3c5d7b3 100644 --- a/docs/themes/theme-custom/theme/RasaLabsBanner/index.jsx +++ b/docs/themes/theme-custom/theme/RasaLabsBanner/index.jsx @@ -1,17 +1,18 @@ import * as React from 'react'; import clsx from 'clsx'; +import CodeBlock from '@theme/CodeBlock'; import styles from './styles.module.css'; function RasaLabsBanner({isLoading, ...props}) { return ( <> -
-
+
+
- + - + Rasa Labs access {props.version && <> @@ -20,19 +21,26 @@ function RasaLabsBanner({isLoading, ...props}) { }
-
+

Rasa Labs features are experimental. We introduce experimental - features to co-create with our customers. If you are interested in using this feature, + features to co-create with our customers. To find out more about how to participate + in our Labs program visit our {' '} - - please contact us + + Rasa Labs page . +
+
+ We are continuously improving Rasa Labs features based on customer feedback. To benefit from the latest + bug fixes and feature improvements, please install the latest pre-release using: - These features might be changed or removed in the future. + + pip install 'rasa-plus>3.6' --pre --upgrade +

-
+
) } diff --git a/docs/themes/theme-custom/theme/RasaLabsLabel/styles.module.css b/docs/themes/theme-custom/theme/RasaLabsLabel/styles.module.css index 4a6e8636517a..f2f0a4517ae4 100644 --- a/docs/themes/theme-custom/theme/RasaLabsLabel/styles.module.css +++ b/docs/themes/theme-custom/theme/RasaLabsLabel/styles.module.css @@ -5,6 +5,8 @@ padding: 2px 12px; font-size: 15px !important; font-weight: 600; + margin-left: 8px; + margin-top: 0px !important; display: inline-block; } diff --git a/poetry.lock b/poetry.lock index 34286a5d445a..e1007fb635cf 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1016,6 +1016,7 @@ files = [ {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18a64814ae7bce73925131381603fff0116e2df25230dfc80d6d690aa6e20b37"}, {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90c81f22b4f572f8a2110b0b741bb64e5a6427e0a198b2cdc1fbaf85f352a3aa"}, {file = "contourpy-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:53cc3a40635abedbec7f1bde60f8c189c49e84ac180c665f2cd7c162cc454baa"}, + {file = "contourpy-1.1.0-cp310-cp310-win32.whl", hash = "sha256:9b2dd2ca3ac561aceef4c7c13ba654aaa404cf885b187427760d7f7d4c57cff8"}, {file = "contourpy-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:1f795597073b09d631782e7245016a4323cf1cf0b4e06eef7ea6627e06a37ff2"}, {file = "contourpy-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0b7b04ed0961647691cfe5d82115dd072af7ce8846d31a5fac6c142dcce8b882"}, {file = "contourpy-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27bc79200c742f9746d7dd51a734ee326a292d77e7d94c8af6e08d1e6c15d545"}, @@ -1024,6 +1025,7 @@ files = [ {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5cec36c5090e75a9ac9dbd0ff4a8cf7cecd60f1b6dc23a374c7d980a1cd710e"}, {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f0cbd657e9bde94cd0e33aa7df94fb73c1ab7799378d3b3f902eb8eb2e04a3a"}, {file = "contourpy-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:181cbace49874f4358e2929aaf7ba84006acb76694102e88dd15af861996c16e"}, + {file = "contourpy-1.1.0-cp311-cp311-win32.whl", hash = "sha256:edb989d31065b1acef3828a3688f88b2abb799a7db891c9e282df5ec7e46221b"}, {file = "contourpy-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fb3b7d9e6243bfa1efb93ccfe64ec610d85cfe5aec2c25f97fbbd2e58b531256"}, {file = "contourpy-1.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bcb41692aa09aeb19c7c213411854402f29f6613845ad2453d30bf421fe68fed"}, {file = "contourpy-1.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5d123a5bc63cd34c27ff9c7ac1cd978909e9c71da12e05be0231c608048bb2ae"}, @@ -1032,6 +1034,7 @@ files = [ {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:317267d915490d1e84577924bd61ba71bf8681a30e0d6c545f577363157e5e94"}, {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d551f3a442655f3dcc1285723f9acd646ca5858834efeab4598d706206b09c9f"}, {file = "contourpy-1.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e7a117ce7df5a938fe035cad481b0189049e8d92433b4b33aa7fc609344aafa1"}, + {file = "contourpy-1.1.0-cp38-cp38-win32.whl", hash = "sha256:108dfb5b3e731046a96c60bdc46a1a0ebee0760418951abecbe0fc07b5b93b27"}, {file = "contourpy-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:d4f26b25b4f86087e7d75e63212756c38546e70f2a92d2be44f80114826e1cd4"}, {file = "contourpy-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc00bb4225d57bff7ebb634646c0ee2a1298402ec10a5fe7af79df9a51c1bfd9"}, {file = "contourpy-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:189ceb1525eb0655ab8487a9a9c41f42a73ba52d6789754788d1883fb06b2d8a"}, @@ -1040,6 +1043,7 @@ files = [ {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:143dde50520a9f90e4a2703f367cf8ec96a73042b72e68fcd184e1279962eb6f"}, {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e94bef2580e25b5fdb183bf98a2faa2adc5b638736b2c0a4da98691da641316a"}, {file = "contourpy-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ed614aea8462735e7d70141374bd7650afd1c3f3cb0c2dbbcbe44e14331bf002"}, + {file = "contourpy-1.1.0-cp39-cp39-win32.whl", hash = "sha256:71551f9520f008b2950bef5f16b0e3587506ef4f23c734b71ffb7b89f8721999"}, {file = "contourpy-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:438ba416d02f82b692e371858143970ed2eb6337d9cdbbede0d8ad9f3d7dd17d"}, {file = "contourpy-1.1.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a698c6a7a432789e587168573a864a7ea374c6be8d4f31f9d87c001d5a843493"}, {file = "contourpy-1.1.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:397b0ac8a12880412da3551a8cb5a187d3298a72802b45a3bd1805e204ad8439"}, @@ -2122,6 +2126,7 @@ files = [ {file = "greenlet-2.0.2-cp27-cp27m-win32.whl", hash = "sha256:6c3acb79b0bfd4fe733dff8bc62695283b57949ebcca05ae5c129eb606ff2d74"}, {file = "greenlet-2.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:283737e0da3f08bd637b5ad058507e578dd462db259f7f6e4c5c365ba4ee9343"}, {file = "greenlet-2.0.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d27ec7509b9c18b6d73f2f5ede2622441de812e7b1a80bbd446cb0633bd3d5ae"}, + {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d967650d3f56af314b72df7089d96cda1083a7fc2da05b375d2bc48c82ab3f3c"}, {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"}, {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"}, {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"}, @@ -2130,6 +2135,7 @@ files = [ {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"}, {file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"}, {file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"}, + {file = "greenlet-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d4606a527e30548153be1a9f155f4e283d109ffba663a15856089fb55f933e47"}, {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"}, {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"}, {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"}, @@ -2159,6 +2165,7 @@ files = [ {file = "greenlet-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:3f6ea9bd35eb450837a3d80e77b517ea5bc56b4647f5502cd28de13675ee12f7"}, {file = "greenlet-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7492e2b7bd7c9b9916388d9df23fa49d9b88ac0640db0a5b4ecc2b653bf451e3"}, {file = "greenlet-2.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b864ba53912b6c3ab6bcb2beb19f19edd01a6bfcbdfe1f37ddd1778abfe75a30"}, + {file = "greenlet-2.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1087300cf9700bbf455b1b97e24db18f2f77b55302a68272c56209d5587c12d1"}, {file = "greenlet-2.0.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba2956617f1c42598a308a84c6cf021a90ff3862eddafd20c3333d50f0edb45b"}, {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3a569657468b6f3fb60587e48356fe512c1754ca05a564f11366ac9e306526"}, {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8eab883b3b2a38cc1e050819ef06a7e6344d4a990d24d45bc6f2cf959045a45b"}, @@ -2167,6 +2174,7 @@ files = [ {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0ef99cdbe2b682b9ccbb964743a6aca37905fda5e0452e5ee239b1654d37f2a"}, {file = "greenlet-2.0.2-cp38-cp38-win32.whl", hash = "sha256:b80f600eddddce72320dbbc8e3784d16bd3fb7b517e82476d8da921f27d4b249"}, {file = "greenlet-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:4d2e11331fc0c02b6e84b0d28ece3a36e0548ee1a1ce9ddde03752d9b79bba40"}, + {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8512a0c38cfd4e66a858ddd1b17705587900dd760c6003998e9472b77b56d417"}, {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:88d9ab96491d38a5ab7c56dd7a3cc37d83336ecc564e4e8816dbed12e5aaefc8"}, {file = "greenlet-2.0.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:561091a7be172ab497a3527602d467e2b3fbe75f9e783d8b8ce403fa414f71a6"}, {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:971ce5e14dc5e73715755d0ca2975ac88cfdaefcaab078a284fea6cfabf866df"}, @@ -2926,6 +2934,16 @@ files = [ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, @@ -4733,6 +4751,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -4740,8 +4759,15 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -4758,6 +4784,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -4765,6 +4792,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -4790,13 +4818,13 @@ docs = ["Sphinx (>=3.3,<4.0)", "sphinx-autobuild (>=2020.9.1,<2021.0.0)", "sphin [[package]] name = "randomname" -version = "0.1.5" +version = "0.2.1" description = "Generate random adj-noun names like docker and github." category = "main" optional = false python-versions = "*" files = [ - {file = "randomname-0.1.5.tar.gz", hash = "sha256:e10d14ea10895ee5bc417bdcc6d955e0b586f3bc67094ab87afcf8dcac23ab92"}, + {file = "randomname-0.2.1.tar.gz", hash = "sha256:b79b98302ba4479164b0a4f87995b7bebbd1d91012aeda483341e3e58ace520e"}, ] [package.dependencies] @@ -5155,29 +5183,29 @@ files = [ [[package]] name = "ruff" -version = "0.0.255" +version = "0.0.291" description = "An extremely fast Python linter, written in Rust." category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.255-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:b2d71fb6a7e50501a2473864acffc85dee6b750c25db198f7e71fe1dbbff1aad"}, - {file = "ruff-0.0.255-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:6c97d746861a6010f941179e84bba9feb8a871815667471d9ed6beb98d45c252"}, - {file = "ruff-0.0.255-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a7fa60085079b91a298b963361be9b1b1c724582af6c84be954cbabdbd9309a"}, - {file = "ruff-0.0.255-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c089f7141496334ab5a127b54ce55e41f0d6714e68a4453a1e09d2204cdea8c3"}, - {file = "ruff-0.0.255-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0423908caa7d437a416b853214565b9c33bbd1106c4f88147982216dddcbbd96"}, - {file = "ruff-0.0.255-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:981493e92547cacbb8e0874904ec049fe744507ee890dc8736caf89a8864f9a7"}, - {file = "ruff-0.0.255-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59d5193d2aedb35db180824462b374dbcfc306b2e76076245088afa6e5837df2"}, - {file = "ruff-0.0.255-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd5e00733c9d160c8a34a22e62b390da9d1e9f326676402421cb8c1236beefc3"}, - {file = "ruff-0.0.255-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:694418cf41838bd19c6229e4e1b2d04505b1e6b86fe3ab81165484fc96d36f01"}, - {file = "ruff-0.0.255-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:5d0408985c9777369daebb5d3340a99e9f7294bdd7120642239261508185cf89"}, - {file = "ruff-0.0.255-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:abd6376ef9d12f370d95a8c7c98682fbb9bfedfba59f40e84a816fef8ddcb8de"}, - {file = "ruff-0.0.255-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f9b1a5df0bc09193cbef58a6f78e4a9a0b058a4f9733c0442866d078006d1bb9"}, - {file = "ruff-0.0.255-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:6a25c5f4ff087445b2e1bbcb9963f2ae7c868d65e4a8d5f84c36c12f71571179"}, - {file = "ruff-0.0.255-py3-none-win32.whl", hash = "sha256:1ff87a8310354f9f1a099625e54a27fdd6756d9cd2a40b45922f2e943daf982d"}, - {file = "ruff-0.0.255-py3-none-win_amd64.whl", hash = "sha256:f3d8416be618f023f93ec4fd6ee3048585ef85dba9563b2a7e38fc7e5131d5b1"}, - {file = "ruff-0.0.255-py3-none-win_arm64.whl", hash = "sha256:8ba124819624145d7b6b53add40c367c44318893215ffc1bfe3d72e0225a1c9c"}, - {file = "ruff-0.0.255.tar.gz", hash = "sha256:f9eb1d3b2eecbeedae419fa494c4e2a5e4484baf93a1ce0f81eddb005e1919c5"}, + {file = "ruff-0.0.291-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:b97d0d7c136a85badbc7fd8397fdbb336e9409b01c07027622f28dcd7db366f2"}, + {file = "ruff-0.0.291-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:6ab44ea607967171e18aa5c80335237be12f3a1523375fa0cede83c5cf77feb4"}, + {file = "ruff-0.0.291-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a04b384f2d36f00d5fb55313d52a7d66236531195ef08157a09c4728090f2ef0"}, + {file = "ruff-0.0.291-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b727c219b43f903875b7503a76c86237a00d1a39579bb3e21ce027eec9534051"}, + {file = "ruff-0.0.291-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87671e33175ae949702774071b35ed4937da06f11851af75cd087e1b5a488ac4"}, + {file = "ruff-0.0.291-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:b75f5801547f79b7541d72a211949754c21dc0705c70eddf7f21c88a64de8b97"}, + {file = "ruff-0.0.291-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b09b94efdcd162fe32b472b2dd5bf1c969fcc15b8ff52f478b048f41d4590e09"}, + {file = "ruff-0.0.291-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d5b56bc3a2f83a7a1d7f4447c54d8d3db52021f726fdd55d549ca87bca5d747"}, + {file = "ruff-0.0.291-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13f0d88e5f367b2dc8c7d90a8afdcfff9dd7d174e324fd3ed8e0b5cb5dc9b7f6"}, + {file = "ruff-0.0.291-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b3eeee1b1a45a247758ecdc3ab26c307336d157aafc61edb98b825cadb153df3"}, + {file = "ruff-0.0.291-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:6c06006350c3bb689765d71f810128c9cdf4a1121fd01afc655c87bab4fb4f83"}, + {file = "ruff-0.0.291-py3-none-musllinux_1_2_i686.whl", hash = "sha256:fd17220611047de247b635596e3174f3d7f2becf63bd56301fc758778df9b629"}, + {file = "ruff-0.0.291-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5383ba67ad360caf6060d09012f1fb2ab8bd605ab766d10ca4427a28ab106e0b"}, + {file = "ruff-0.0.291-py3-none-win32.whl", hash = "sha256:1d5f0616ae4cdc7a938b493b6a1a71c8a47d0300c0d65f6e41c281c2f7490ad3"}, + {file = "ruff-0.0.291-py3-none-win_amd64.whl", hash = "sha256:8a69bfbde72db8ca1c43ee3570f59daad155196c3fbe357047cd9b77de65f15b"}, + {file = "ruff-0.0.291-py3-none-win_arm64.whl", hash = "sha256:d867384a4615b7f30b223a849b52104214442b5ba79b473d7edd18da3cde22d6"}, + {file = "ruff-0.0.291.tar.gz", hash = "sha256:c61109661dde9db73469d14a82b42a88c7164f731e6a3b0042e71394c1c7ceed"}, ] [[package]] @@ -7505,4 +7533,4 @@ transformers = ["sentencepiece", "transformers"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<3.11" -content-hash = "8b561c94151793f7e8b2a6b5b61a4e942ebbfa048e4b1338756223d166286bc4" +content-hash = "2f9012af12d212e7f25f198271e17bc0091869d3b868ab8bb63a13bc2aa0357e" diff --git a/pyproject.toml b/pyproject.toml index a0a5d160f10b..fe2feb58044c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,12 +4,12 @@ build-backend = "poetry.core.masonry.api" [tool.black] line-length = 88 -target-version = [ "py37", "py38", "py39", "py310",] +target-version = [ "py38", "py39", "py310",] exclude = "((.eggs | .git | .pytest_cache | build | dist))" [tool.poetry] name = "rasa" -version = "3.8.0a7" +version = "3.8.0a11" description = "Open source machine learning framework to automate text- and voice-based conversations: NLU, dialogue management, connect to Slack, Facebook, and more - Create chatbots and voice assistants" authors = [ "Rasa Technologies GmbH ",] maintainers = [ "Tom Bocklisch ",] @@ -18,7 +18,7 @@ repository = "https://github.com/rasahq/rasa" documentation = "https://rasa.com/docs" classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Topic :: Software Development :: Libraries",] keywords = [ "nlp", "machine-learning", "machine-learning-library", "bot", "bots", "botkit", "rasa conversational-agents", "conversational-ai", "chatbot", "chatbot-framework", "bot-framework",] -include = [ "LICENSE.txt", "README.md", "rasa/shared/core/training_data/visualization.html", "rasa/cli/default_config.yml", "rasa/shared/importers/*", "rasa/utils/schemas/*", "rasa/keys", "rasa/core/channels/chat.html", "rasa/dialogue_understanding/classifiers/command_prompt_template.jinja2",] +include = [ "LICENSE.txt", "README.md", "rasa/shared/core/training_data/visualization.html", "rasa/cli/default_config.yml", "rasa/shared/importers/*", "rasa/utils/schemas/*", "rasa/keys", "rasa/dialogue_understanding/classifiers/command_prompt_template.jinja2",] readme = "README.md" [[tool.poetry.source]] name = "internal repository mirroring psycopg binary for macos" @@ -142,7 +142,7 @@ typing-utils = "^0.1.0" tarsafe = ">=0.0.3,<0.0.6" google-auth = "<3" CacheControl = "^0.12.9" -randomname = ">=0.1.5,<0.3.0" +randomname = ">=0.2.1,<0.3.0" pluggy = "^1.0.0" slack-sdk = "^3.19.2" confluent-kafka = ">=1.9.2,<3.0.0" @@ -311,7 +311,7 @@ git = "https://github.com/RasaHQ/pytest-sanic" branch = "fix_signal_issue" [tool.poetry.group.dev.dependencies] -ruff = ">=0.0.255,<0.0.256" +ruff = ">=0.0.255,<0.0.292" docker = "^6.0.1" pytest-cov = "^4.0.0" pytest-asyncio = "^0.20.0" diff --git a/rasa/__main__.py b/rasa/__main__.py index 0c9a0c86bd41..2132e9027c99 100644 --- a/rasa/__main__.py +++ b/rasa/__main__.py @@ -13,7 +13,6 @@ import rasa.utils.tensorflow.environment as tf_env from rasa import version from rasa.cli import ( - chat, data, export, interactive, @@ -64,7 +63,6 @@ def create_argument_parser() -> argparse.ArgumentParser: scaffold.add_subparser(subparsers, parents=parent_parsers) run.add_subparser(subparsers, parents=parent_parsers) shell.add_subparser(subparsers, parents=parent_parsers) - chat.add_subparser(subparsers, parents=parent_parsers) train.add_subparser(subparsers, parents=parent_parsers) interactive.add_subparser(subparsers, parents=parent_parsers) telemetry.add_subparser(subparsers, parents=parent_parsers) diff --git a/rasa/api.py b/rasa/api.py index d946f63c6146..29b8aa31128b 100644 --- a/rasa/api.py +++ b/rasa/api.py @@ -17,8 +17,8 @@ def run( model: "Text", endpoints: "Text", - connector: "Text" = None, - credentials: "Text" = None, + connector: "Optional[Text]" = None, + credentials: "Optional[Text]" = None, **kwargs: "Dict[Text, Any]", ) -> None: """Runs a Rasa model. diff --git a/rasa/cli/arguments/run.py b/rasa/cli/arguments/run.py index f982672700d1..14ecba19ad8e 100644 --- a/rasa/cli/arguments/run.py +++ b/rasa/cli/arguments/run.py @@ -1,8 +1,17 @@ +import os + import argparse from typing import Union from rasa.cli.arguments.default_arguments import add_model_param, add_endpoint_param from rasa.core import constants +from rasa.env import ( + DEFAULT_JWT_METHOD, + JWT_METHOD_ENV, + JWT_SECRET_ENV, + JWT_PRIVATE_KEY_ENV, + AUTH_TOKEN_ENV, +) def set_run_arguments(parser: argparse.ArgumentParser) -> None: @@ -82,16 +91,25 @@ def add_server_arguments(parser: argparse.ArgumentParser) -> None: "yml file.", ) + add_server_settings_arguments(parser) + + +def add_server_settings_arguments(parser: argparse.ArgumentParser) -> None: + """Add arguments for the API server. + + Args: + parser: Argument parser. + """ server_arguments = parser.add_argument_group("Server Settings") add_interface_argument(server_arguments) - add_port_argument(server_arguments) server_arguments.add_argument( "-t", "--auth-token", type=str, + default=os.getenv(AUTH_TOKEN_ENV), help="Enable token based authentication. Requests need to provide " "the token to be accepted.", ) @@ -150,10 +168,20 @@ def add_server_arguments(parser: argparse.ArgumentParser) -> None: "--connector", type=str, help="Service to connect to." ) + add_jwt_arguments(parser) + + +def add_jwt_arguments(parser: argparse.ArgumentParser) -> None: + """Adds arguments related to JWT authentication. + + Args: + parser: Argument parser. + """ jwt_auth = parser.add_argument_group("JWT Authentication") jwt_auth.add_argument( "--jwt-secret", type=str, + default=os.getenv(JWT_SECRET_ENV), help="Public key for asymmetric JWT methods or shared secret" "for symmetric methods. Please also make sure to use " "--jwt-method to select the method of the signature, " @@ -163,12 +191,13 @@ def add_server_arguments(parser: argparse.ArgumentParser) -> None: jwt_auth.add_argument( "--jwt-method", type=str, - default="HS256", + default=os.getenv(JWT_METHOD_ENV, DEFAULT_JWT_METHOD), help="Method used for the signature of the JWT authentication payload.", ) jwt_auth.add_argument( "--jwt-private-key", type=str, + default=os.getenv(JWT_PRIVATE_KEY_ENV), help="A private key used for generating web tokens, dependent upon " "which hashing algorithm is used. It must be used together with " "--jwt-secret for providing the public key.", diff --git a/rasa/cli/chat.py b/rasa/cli/chat.py deleted file mode 100644 index 83b2bf0cf9af..000000000000 --- a/rasa/cli/chat.py +++ /dev/null @@ -1,69 +0,0 @@ -import argparse -from asyncio import AbstractEventLoop -import logging -from typing import List, Text -import uuid -import webbrowser - -from sanic import Sanic - -from rasa.cli import SubParsersAction -from rasa.cli.arguments import shell as arguments -from rasa.core import constants - -logger = logging.getLogger(__name__) - - -def add_subparser( - subparsers: SubParsersAction, parents: List[argparse.ArgumentParser] -) -> None: - """Add all chat parsers. - - Args: - subparsers: subparser we are going to attach to - parents: Parent parsers, needed to ensure tree structure in argparse - """ - chat_parser = subparsers.add_parser( - "chat", - parents=parents, - conflict_handler="resolve", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - help=( - "Loads your trained model and lets you talk to your " - "assistant in the browser." - ), - ) - chat_parser.set_defaults(func=chat) - - chat_parser.add_argument( - "--conversation-id", - default=uuid.uuid4().hex, - required=False, - help="Set the conversation ID.", - ) - - arguments.set_shell_arguments(chat_parser) - chat_parser.set_defaults(enable_api=True) - - -async def open_chat_in_browser(server_url: Text) -> None: - """Opens the rasa chat in the default browser.""" - webbrowser.open(f"{server_url}/webhooks/socketio/chat.html") - - -def chat(args: argparse.Namespace) -> None: - """Chat to the bot using the most recent model.""" - import rasa.cli.run - - args.connector = "socketio" - - logging.getLogger("rasa.core.tracker_store").setLevel(logging.INFO) - - async def after_start_hook_open_chat(_: Sanic, __: AbstractEventLoop) -> None: - """Hook to open the browser on server start.""" - server_url = constants.DEFAULT_SERVER_FORMAT.format("http", args.port) - await open_chat_in_browser(server_url) - - args.server_listeners = [(after_start_hook_open_chat, "after_server_start")] - - rasa.cli.run.run(args) diff --git a/rasa/cli/data.py b/rasa/cli/data.py index 06f912168c1d..4673a28e3783 100644 --- a/rasa/cli/data.py +++ b/rasa/cli/data.py @@ -144,6 +144,22 @@ def _add_data_validate_parsers( ) arguments.set_validator_arguments(story_structure_parser) + flows_structure_parser = validate_subparsers.add_parser( + "flows", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + parents=parents, + help="Checks for inconsistencies in the flows files.", + ) + flows_structure_parser.set_defaults( + func=lambda args: rasa.cli.utils.validate_files( + args.fail_on_warnings, + args.max_history, + _build_training_data_importer(args), + flows_only=True, + ) + ) + arguments.set_validator_arguments(flows_structure_parser) + def _build_training_data_importer(args: argparse.Namespace) -> "TrainingDataImporter": config = rasa.cli.utils.get_validated_path( diff --git a/rasa/cli/utils.py b/rasa/cli/utils.py index b205c2072d17..1778416fd1ac 100644 --- a/rasa/cli/utils.py +++ b/rasa/cli/utils.py @@ -127,7 +127,7 @@ def validate_assistant_id_in_config(config_file: Union["Path", Text]) -> None: if assistant_id is None or assistant_id == ASSISTANT_ID_DEFAULT_VALUE: rasa.shared.utils.io.raise_warning( - f"The config file '{str(config_file)}' is missing a unique value for the " + f"The config file '{config_file!s}' is missing a unique value for the " f"'{ASSISTANT_ID_KEY}' mandatory key. Proceeding with generating a random " f"value and overwriting the '{ASSISTANT_ID_KEY}' in the config file." ) @@ -217,6 +217,7 @@ def validate_files( max_history: Optional[int], importer: TrainingDataImporter, stories_only: bool = False, + flows_only: bool = False, ) -> None: """Validates either the story structure or the entire project. @@ -225,6 +226,7 @@ def validate_files( max_history: The max history to use when validating the story structure. importer: The `TrainingDataImporter` to use to load the training data. stories_only: If `True`, only the story structure is validated. + flows_only: If `True`, only the flows are validated. """ from rasa.validator import Validator @@ -232,6 +234,8 @@ def validate_files( if stories_only: all_good = _validate_story_structure(validator, max_history, fail_on_warnings) + elif flows_only: + all_good = validator.verify_flows() else: if importer.get_domain().is_empty(): rasa.shared.utils.cli.print_error_and_exit( @@ -243,8 +247,9 @@ def validate_files( valid_stories = _validate_story_structure( validator, max_history, fail_on_warnings ) + valid_flows = validator.verify_flows() - all_good = valid_domain and valid_nlu and valid_stories + all_good = valid_domain and valid_nlu and valid_stories and valid_flows validator.warn_if_config_mandatory_keys_are_not_set() diff --git a/rasa/core/actions/action.py b/rasa/core/actions/action.py index 28258cfab877..6a88d8e62927 100644 --- a/rasa/core/actions/action.py +++ b/rasa/core/actions/action.py @@ -57,6 +57,7 @@ ACTION_VALIDATE_SLOT_MAPPINGS, MAPPING_TYPE, SlotMappingType, + KNOWLEDGE_BASE_SLOT_NAMES, ) from rasa.shared.core.domain import Domain from rasa.shared.core.events import ( @@ -100,9 +101,10 @@ def default_actions(action_endpoint: Optional[EndpointConfig] = None) -> List["A from rasa.dialogue_understanding.patterns.correction import ActionCorrectFlowSlot from rasa.dialogue_understanding.patterns.cancel import ActionCancelFlow from rasa.dialogue_understanding.patterns.clarify import ActionClarifyFlows - from rasa.core.actions.action_run_slot_rejections import ( - ActionRunSlotRejections, - ) + from rasa.core.actions.action_run_slot_rejections import ActionRunSlotRejections + from rasa.core.actions.action_trigger_chitchat import ActionTriggerChitchat + from rasa.core.actions.action_trigger_search import ActionTriggerSearch + from rasa.core.actions.action_clean_stack import ActionCleanStack return [ ActionListen(), @@ -122,6 +124,9 @@ def default_actions(action_endpoint: Optional[EndpointConfig] = None) -> List["A ActionCorrectFlowSlot(), ActionClarifyFlows(), ActionRunSlotRejections(), + ActionCleanStack(), + ActionTriggerSearch(), + ActionTriggerChitchat(), ] @@ -1175,7 +1180,7 @@ async def _run_custom_action( except (RasaException, ClientResponseError) as e: logger.warning( f"Failed to execute custom action '{custom_action}' " - f"as a result of error '{str(e)}'. The default action " + f"as a result of error '{e!s}'. The default action " f"'{self.name()}' failed to fill slots with custom " f"mappings." ) @@ -1292,7 +1297,9 @@ async def run( executed_custom_actions: Set[Text] = set() user_slots = [ - slot for slot in domain.slots if slot.name not in DEFAULT_SLOT_NAMES + slot + for slot in domain.slots + if slot.name not in DEFAULT_SLOT_NAMES | KNOWLEDGE_BASE_SLOT_NAMES ] for slot in user_slots: diff --git a/rasa/core/actions/action_clean_stack.py b/rasa/core/actions/action_clean_stack.py new file mode 100644 index 000000000000..a885abbdbf8e --- /dev/null +++ b/rasa/core/actions/action_clean_stack.py @@ -0,0 +1,50 @@ +from __future__ import annotations + +from typing import Optional, Dict, Any, List + +from rasa.core.actions.action import Action +from rasa.core.channels import OutputChannel +from rasa.core.nlg import NaturalLanguageGenerator +from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack +from rasa.dialogue_understanding.stack.frames import ( + BaseFlowStackFrame, + UserFlowStackFrame, +) +from rasa.dialogue_understanding.stack.frames.flow_stack_frame import FlowStackFrameType +from rasa.shared.core.constants import ACTION_CLEAN_STACK, DIALOGUE_STACK_SLOT +from rasa.shared.core.domain import Domain +from rasa.shared.core.events import Event, SlotSet +from rasa.shared.core.flows.flow import ContinueFlowStep, END_STEP +from rasa.shared.core.trackers import DialogueStateTracker + + +class ActionCleanStack(Action): + """Action which cancels a flow from the stack.""" + + def name(self) -> str: + """Return the flow name.""" + return ACTION_CLEAN_STACK + + async def run( + self, + output_channel: OutputChannel, + nlg: NaturalLanguageGenerator, + tracker: DialogueStateTracker, + domain: Domain, + metadata: Optional[Dict[str, Any]] = None, + ) -> List[Event]: + """Clean the stack.""" + stack = DialogueStack.from_tracker(tracker) + + new_frames = [] + # Set all frames to their end step, filter out any non-BaseFlowStackFrames + for frame in stack.frames: + if isinstance(frame, BaseFlowStackFrame): + frame.step_id = ContinueFlowStep.continue_step_for_id(END_STEP) + if isinstance(frame, UserFlowStackFrame): + # Making sure there are no "continue interrupts" triggered + frame.frame_type = FlowStackFrameType.REGULAR + new_frames.append(frame) + new_stack = DialogueStack.from_dict([frame.as_dict() for frame in new_frames]) + + return [SlotSet(DIALOGUE_STACK_SLOT, new_stack.as_dict())] diff --git a/rasa/core/actions/action_trigger_chitchat.py b/rasa/core/actions/action_trigger_chitchat.py new file mode 100644 index 000000000000..d57d27fe9e54 --- /dev/null +++ b/rasa/core/actions/action_trigger_chitchat.py @@ -0,0 +1,32 @@ +from typing import Optional, Dict, Any, List + +from rasa.core.actions.action import Action +from rasa.core.channels import OutputChannel +from rasa.core.nlg import NaturalLanguageGenerator +from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack +from rasa.dialogue_understanding.stack.frames import ChitChatStackFrame +from rasa.shared.core.constants import ACTION_TRIGGER_CHITCHAT +from rasa.shared.core.domain import Domain +from rasa.shared.core.events import Event +from rasa.shared.core.trackers import DialogueStateTracker + + +class ActionTriggerChitchat(Action): + """Action which triggers a chitchat answer.""" + + def name(self) -> str: + """Return the name of the action.""" + return ACTION_TRIGGER_CHITCHAT + + async def run( + self, + output_channel: OutputChannel, + nlg: NaturalLanguageGenerator, + tracker: DialogueStateTracker, + domain: Domain, + metadata: Optional[Dict[str, Any]] = None, + ) -> List[Event]: + """Run the predicate checks.""" + dialogue_stack = DialogueStack.from_tracker(tracker) + dialogue_stack.push(ChitChatStackFrame()) + return [dialogue_stack.persist_as_event()] diff --git a/rasa/core/actions/action_trigger_search.py b/rasa/core/actions/action_trigger_search.py new file mode 100644 index 000000000000..8e5f16b5226b --- /dev/null +++ b/rasa/core/actions/action_trigger_search.py @@ -0,0 +1,32 @@ +from typing import Optional, Dict, Any, List + +from rasa.core.actions.action import Action +from rasa.core.channels import OutputChannel +from rasa.core.nlg import NaturalLanguageGenerator +from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack +from rasa.dialogue_understanding.stack.frames import SearchStackFrame +from rasa.shared.core.constants import ACTION_TRIGGER_SEARCH +from rasa.shared.core.domain import Domain +from rasa.shared.core.events import Event +from rasa.shared.core.trackers import DialogueStateTracker + + +class ActionTriggerSearch(Action): + """Action which triggers a search""" + + def name(self) -> str: + """Return the name of the action.""" + return ACTION_TRIGGER_SEARCH + + async def run( + self, + output_channel: OutputChannel, + nlg: NaturalLanguageGenerator, + tracker: DialogueStateTracker, + domain: Domain, + metadata: Optional[Dict[str, Any]] = None, + ) -> List[Event]: + """Run the predicate checks.""" + dialogue_stack = DialogueStack.from_tracker(tracker) + dialogue_stack.push(SearchStackFrame()) + return [dialogue_stack.persist_as_event()] diff --git a/rasa/core/actions/flow_trigger_action.py b/rasa/core/actions/flow_trigger_action.py index 82ee017679bf..7b4271e27c50 100644 --- a/rasa/core/actions/flow_trigger_action.py +++ b/rasa/core/actions/flow_trigger_action.py @@ -10,9 +10,6 @@ from rasa.core.channels import OutputChannel from rasa.shared.constants import FLOW_PREFIX -from rasa.shared.core.constants import ( - DIALOGUE_STACK_SLOT, -) from rasa.shared.core.domain import Domain from rasa.shared.core.events import ( ActiveLoop, @@ -70,7 +67,7 @@ async def run( ] events: List[Event] = [ - SlotSet(DIALOGUE_STACK_SLOT, stack.as_dict()) + stack.persist_as_event(), ] + slot_set_events if tracker.active_loop_name: events.append(ActiveLoop(None)) diff --git a/rasa/core/brokers/file.py b/rasa/core/brokers/file.py index aa6495c3b7fc..66a727bedbf5 100644 --- a/rasa/core/brokers/file.py +++ b/rasa/core/brokers/file.py @@ -15,7 +15,8 @@ class FileEventBroker(EventBroker): """Log events to a file in json format. - There will be one event per line and each event is stored as json.""" + There will be one event per line and each event is stored as json. + """ DEFAULT_LOG_FILE_NAME = "rasa_event.log" @@ -38,7 +39,6 @@ async def from_endpoint_config( def _event_logger(self) -> logging.Logger: """Instantiate the file logger.""" - logger_file = self.path # noinspection PyTypeChecker query_logger = logging.getLogger("event-logger") @@ -54,6 +54,5 @@ def _event_logger(self) -> logging.Logger: def publish(self, event: Dict) -> None: """Write event to file.""" - self.event_logger.info(json.dumps(event)) self.event_logger.handlers[0].flush() diff --git a/rasa/core/channels/callback.py b/rasa/core/channels/callback.py index de33772d8d34..28a220b6eeda 100644 --- a/rasa/core/channels/callback.py +++ b/rasa/core/channels/callback.py @@ -45,7 +45,8 @@ class CallbackInput(RestInput): """A custom REST http input channel that responds using a callback server. Incoming messages are received through a REST interface. Responses - are sent asynchronously by calling a configured external REST endpoint.""" + are sent asynchronously by calling a configured external REST endpoint. + """ @classmethod def name(cls) -> Text: diff --git a/rasa/core/channels/channel.py b/rasa/core/channels/channel.py index b4f168237668..63d2d0feb165 100644 --- a/rasa/core/channels/channel.py +++ b/rasa/core/channels/channel.py @@ -40,7 +40,7 @@ def __init__( text: Optional[Text] = None, output_channel: Optional["OutputChannel"] = None, sender_id: Optional[Text] = None, - parse_data: Dict[Text, Any] = None, + parse_data: Optional[Dict[Text, Any]] = None, input_channel: Optional[Text] = None, message_id: Optional[Text] = None, metadata: Optional[Dict] = None, @@ -340,11 +340,11 @@ def name(cls) -> Text: @staticmethod def _message( recipient_id: Text, - text: Text = None, - image: Text = None, - buttons: List[Dict[Text, Any]] = None, - attachment: Text = None, - custom: Dict[Text, Any] = None, + text: Optional[Text] = None, + image: Optional[Text] = None, + buttons: Optional[List[Dict[Text, Any]]] = None, + attachment: Optional[Text] = None, + custom: Optional[Dict[Text, Any]] = None, ) -> Dict: """Create a message object that will be stored.""" obj = { diff --git a/rasa/core/channels/chat.html b/rasa/core/channels/chat.html deleted file mode 100644 index c7f299da3405..000000000000 --- a/rasa/core/channels/chat.html +++ /dev/null @@ -1,429 +0,0 @@ - - - - - Rasa Bot Inspector - - - - - - - - - -
-

Happy chatting!

-

- You can include the chat on your own website using the following code - (rasa documentation): -

-
-<div id="rasa-chat-widget" data-websocket-url="https://example.com/"></div>
-<script src="https://unpkg.com/@rasahq/rasa-chat" type="application/javascript"></script>
-      
-
- -
-
-

Stack (top is active):

- - - - - - - - - -
IDFlowStep Id
-

Slots:

- - - - - - - - -
SlotValue
-
-

End-2-End Test:

-
-
No test yet
-
-
-
-

Tracker State:

-
-
No story yet
-
-
-
-
-

Current Flow:

-
- - -
-
-

-        
-
-
-
- -
-
-
-
- - - - - - - - - - - diff --git a/rasa/core/channels/rocketchat.py b/rasa/core/channels/rocketchat.py index 3540e2d2e9d7..88934254df87 100644 --- a/rasa/core/channels/rocketchat.py +++ b/rasa/core/channels/rocketchat.py @@ -34,8 +34,7 @@ def _convert_to_rocket_buttons(buttons: List[Dict]) -> List[Dict]: async def send_text_message( self, recipient_id: Text, text: Text, **kwargs: Any ) -> None: - """Send message to output channel""" - + """Send message to output channel.""" for message_part in text.strip().split("\n\n"): self.rocket.chat_post_message(message_part, room_id=recipient_id) diff --git a/rasa/core/channels/socketio.py b/rasa/core/channels/socketio.py index 70ae1003e225..aa1eee17a647 100644 --- a/rasa/core/channels/socketio.py +++ b/rasa/core/channels/socketio.py @@ -13,8 +13,6 @@ logger = logging.getLogger(__name__) -CHAT_TEMPLATE_PATH = "/chat.html" - class SocketBlueprint(Blueprint): """Blueprint for socketio connections.""" @@ -53,22 +51,19 @@ def __init__(self, sio: AsyncServer, bot_message_evt: Text) -> None: async def _send_message(self, socket_id: Text, response: Any) -> None: """Sends a message to the recipient using the bot event.""" - await self.sio.emit(self.bot_message_evt, response, room=socket_id) async def send_text_message( self, recipient_id: Text, text: Text, **kwargs: Any ) -> None: """Send a message through this channel.""" - for message_part in text.strip().split("\n\n"): await self._send_message(recipient_id, {"text": message_part}) async def send_image_url( self, recipient_id: Text, image: Text, **kwargs: Any ) -> None: - """Sends an image to the output""" - + """Sends an image to the output.""" message = {"attachment": {"type": "image", "payload": {"src": image}}} await self._send_message(recipient_id, message) @@ -80,7 +75,6 @@ async def send_text_with_buttons( **kwargs: Any, ) -> None: """Sends buttons to the output.""" - # split text and create a message for each text fragment # the `or` makes sure there is at least one message we can attach the quick # replies to @@ -106,7 +100,6 @@ async def send_elements( self, recipient_id: Text, elements: Iterable[Dict[Text, Any]], **kwargs: Any ) -> None: """Sends elements to the output.""" - for element in elements: message = { "attachment": { @@ -120,8 +113,7 @@ async def send_elements( async def send_custom_json( self, recipient_id: Text, json_message: Dict[Text, Any], **kwargs: Any ) -> None: - """Sends custom json to the output""" - + """Sends custom json to the output.""" json_message.setdefault("room", recipient_id) await self.sio.emit(self.bot_message_evt, **json_message) @@ -147,7 +139,7 @@ def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> InputChanne credentials.get("user_message_evt", "user_uttered"), credentials.get("bot_message_evt", "bot_uttered"), credentials.get("namespace"), - credentials.get("session_persistence", True), + credentials.get("session_persistence", False), credentials.get("socketio_path", "/socket.io"), credentials.get("jwt_key"), credentials.get("jwt_method", "HS256"), @@ -159,7 +151,7 @@ def __init__( user_message_evt: Text = "user_uttered", bot_message_evt: Text = "bot_uttered", namespace: Optional[Text] = None, - session_persistence: bool = True, + session_persistence: bool = False, socketio_path: Optional[Text] = "/socket.io", jwt_key: Optional[Text] = None, jwt_method: Optional[Text] = "HS256", @@ -190,12 +182,6 @@ def get_output_channel(self) -> Optional["OutputChannel"]: return None return SocketIOOutput(self.sio, self.bot_message_evt) - def chat_html_path(self) -> Text: - """Returns the path to the chat.html file.""" - import pkg_resources - - return pkg_resources.resource_filename(__name__, CHAT_TEMPLATE_PATH) - def blueprint( self, on_new_message: Callable[[UserMessage], Awaitable[Any]] ) -> Blueprint: @@ -214,10 +200,6 @@ def blueprint( async def health(_: Request) -> HTTPResponse: return response.json({"status": "ok"}) - @socketio_webhook.route("/chat.html", methods=["GET"]) - async def chat(_: Request) -> HTTPResponse: - return await response.file(self.chat_html_path()) - @sio.on("connect", namespace=self.namespace) async def connect(sid: Text, environ: Dict, auth: Optional[Dict]) -> bool: if self.jwt_key: diff --git a/rasa/core/channels/telegram.py b/rasa/core/channels/telegram.py index 2800edfc8c52..3c2f37ce9c33 100644 --- a/rasa/core/channels/telegram.py +++ b/rasa/core/channels/telegram.py @@ -147,7 +147,7 @@ async def send_custom_json( class TelegramInput(InputChannel): - """Telegram input channel""" + """Telegram input channel.""" @classmethod def name(cls) -> Text: diff --git a/rasa/core/channels/twilio_voice.py b/rasa/core/channels/twilio_voice.py index ca22ddd767b6..af91aedb7a04 100644 --- a/rasa/core/channels/twilio_voice.py +++ b/rasa/core/channels/twilio_voice.py @@ -18,7 +18,7 @@ class TwilioVoiceInput(InputChannel): """Input channel for Twilio Voice.""" - SUPPORTED_VOICES = [ + SUPPORTED_VOICES = [ # noqa: RUF012 "man", "woman", "alice", @@ -88,7 +88,11 @@ class TwilioVoiceInput(InputChannel): "Polly.Aditi", ] - SUPPORTED_SPEECH_MODELS = ["default", "numbers_and_commands", "phone_call"] + SUPPORTED_SPEECH_MODELS = [ # noqa: RUF012 + "default", + "numbers_and_commands", + "phone_call", + ] @classmethod def name(cls) -> Text: diff --git a/rasa/core/channels/webexteams.py b/rasa/core/channels/webexteams.py index 522d7e2bc685..42f747c3528e 100644 --- a/rasa/core/channels/webexteams.py +++ b/rasa/core/channels/webexteams.py @@ -105,7 +105,6 @@ async def health(_: Request) -> HTTPResponse: @webexteams_webhook.route("/webhook", methods=["POST"]) async def webhook(request: Request) -> HTTPResponse: """Respond to inbound webhook HTTP POST from Webex Teams.""" - logger.debug("Received webex webhook call") # Get the POST data sent from Webex Teams json_data = request.json diff --git a/rasa/core/evaluation/marker_base.py b/rasa/core/evaluation/marker_base.py index f942ff3ad2e6..2e09585c1a40 100644 --- a/rasa/core/evaluation/marker_base.py +++ b/rasa/core/evaluation/marker_base.py @@ -47,12 +47,14 @@ class MarkerRegistry: """Keeps track of tags that can be used to configure markers.""" - all_tags: Set[Text] = set() - condition_tag_to_marker_class: Dict[Text, Type[ConditionMarker]] = {} - operator_tag_to_marker_class: Dict[Text, Type[OperatorMarker]] = {} - marker_class_to_tag: Dict[Type[Marker], Text] = {} - negated_tag_to_tag: Dict[Text, Text] = {} - tag_to_negated_tag: Dict[Text, Text] = {} + all_tags: Set[Text] = set() # noqa: RUF012 + condition_tag_to_marker_class: Dict[ + Text, Type[ConditionMarker] + ] = {} # noqa: RUF012 + operator_tag_to_marker_class: Dict[Text, Type[OperatorMarker]] = {} # noqa: RUF012 + marker_class_to_tag: Dict[Type[Marker], Text] = {} # noqa: RUF012 + negated_tag_to_tag: Dict[Text, Text] = {} # noqa: RUF012 + tag_to_negated_tag: Dict[Text, Text] = {} # noqa: RUF012 @classmethod def register_builtin_markers(cls) -> None: @@ -176,6 +178,7 @@ def __init__( applies if and only if the non-negated marker does not apply) description: an optional description of the marker. It is not used internally but can be used to document the marker. + Raises: `InvalidMarkerConfig` if the chosen *name* of the marker is the tag of a predefined marker. @@ -450,7 +453,7 @@ def from_path(cls, path: Union[Path, Text]) -> "OrMarker": # printed when we run rasa evaluate with --debug flag raise InvalidMarkerConfig( f"Could not load marker {marker_name} from {yaml_file}. " - f"Reason: {str(e)}. " + f"Reason: {e!s}. " ) loaded_markers.append(marker) @@ -716,6 +719,7 @@ def __init__( conversion of this marker description: an optional description of the marker. It is not used internally but can be used to document the marker. + Raises: `InvalidMarkerConfig` if the given number of sub-markers does not match the expected number of sub-markers @@ -829,7 +833,7 @@ def from_tag_and_sub_config( # printed when we run rasa evaluate with --debug flag raise InvalidMarkerConfig( f"Could not create sub-marker for operator '{tag}' from " - f"{sub_marker_config}. Reason: {str(e)}" + f"{sub_marker_config}. Reason: {e!s}" ) collected_sub_markers.append(sub_marker) try: @@ -839,7 +843,7 @@ def from_tag_and_sub_config( # printed when we run rasa evaluate with --debug flag raise InvalidMarkerConfig( f"Could not create operator '{tag}' with sub-markers " - f"{collected_sub_markers}. Reason: {str(e)}" + f"{collected_sub_markers}. Reason: {e!s}" ) marker.name = name marker.description = description diff --git a/rasa/core/evaluation/marker_tracker_loader.py b/rasa/core/evaluation/marker_tracker_loader.py index 5ef83d86fb4c..b766dbf14a9a 100644 --- a/rasa/core/evaluation/marker_tracker_loader.py +++ b/rasa/core/evaluation/marker_tracker_loader.py @@ -28,7 +28,7 @@ def strategy_sample_n(keys: List[Text], count: int) -> Iterable[Text]: class MarkerTrackerLoader: """Represents a wrapper over a `TrackerStore` with a configurable access pattern.""" - _STRATEGY_MAP = { + _STRATEGY_MAP = { # noqa: RUF012 "all": strategy_all, "first_n": strategy_first_n, "sample_n": strategy_sample_n, @@ -38,7 +38,7 @@ def __init__( self, tracker_store: TrackerStore, strategy: str, - count: int = None, + count: Optional[int] = None, seed: Any = None, ) -> None: """Creates a MarkerTrackerLoader. diff --git a/rasa/core/exporter.py b/rasa/core/exporter.py index 58c567dbdbe2..ef98526b8058 100644 --- a/rasa/core/exporter.py +++ b/rasa/core/exporter.py @@ -160,7 +160,7 @@ def _validate_all_requested_ids_exist( self, conversation_ids_in_tracker_store: Set[Text] ) -> None: """Warn user if `self.requested_conversation_ids` contains IDs not found in - `conversation_ids_in_tracker_store` + `conversation_ids_in_tracker_store`. Args: conversation_ids_in_tracker_store: Set of conversation IDs contained in diff --git a/rasa/core/featurizers/precomputation.py b/rasa/core/featurizers/precomputation.py index febbb8abe506..3491a9f7d9f8 100644 --- a/rasa/core/featurizers/precomputation.py +++ b/rasa/core/featurizers/precomputation.py @@ -57,7 +57,7 @@ class MessageContainerForCoreFeaturization: See: `rasa.core.featurizers.precomputation.CoreFeaturizationCollector`. """ - KEY_ATTRIBUTES = [ACTION_NAME, ACTION_TEXT, TEXT, INTENT] + KEY_ATTRIBUTES = [ACTION_NAME, ACTION_TEXT, TEXT, INTENT] # noqa: RUF012 def __init__(self) -> None: """Creates an empty container for precomputations.""" @@ -85,7 +85,7 @@ def __len__(self) -> int: len(key_attribute_table) for key_attribute_table in self._table.values() ) - def messages(self, key_attribute: Text = None) -> ValuesView: + def messages(self, key_attribute: Optional[Text] = None) -> ValuesView: """Returns a view of all messages.""" if key_attribute not in self._table: raise ValueError( @@ -137,7 +137,7 @@ def add(self, message_with_one_key_attribute: Message) -> None: f"{self.KEY_ATTRIBUTES} but received {len(attributes)} attributes " f"({attributes})." ) - key_attribute = list(key_attributes)[0] + key_attribute = list(key_attributes)[0] # noqa: RUF015 key_value = str(message_with_one_key_attribute.data[key_attribute]) # extract the message existing_message = self._table[key_attribute].get(key_value) diff --git a/rasa/core/jobs.py b/rasa/core/jobs.py index 0bd843aa9a9e..2641cfc7a49b 100644 --- a/rasa/core/jobs.py +++ b/rasa/core/jobs.py @@ -13,8 +13,8 @@ async def scheduler() -> AsyncIOScheduler: """Thread global scheduler to handle all recurring tasks. - If no scheduler exists yet, this will instantiate one.""" - + If no scheduler exists yet, this will instantiate one. + """ global __scheduler if not __scheduler: @@ -54,8 +54,8 @@ async def scheduler() -> AsyncIOScheduler: def kill_scheduler() -> None: """Terminate the scheduler if started. - Another call to `scheduler` will create a new scheduler.""" - + Another call to `scheduler` will create a new scheduler. + """ global __scheduler if __scheduler: diff --git a/rasa/core/lock_store.py b/rasa/core/lock_store.py index b7d495b3f1f5..e8baff134a2e 100644 --- a/rasa/core/lock_store.py +++ b/rasa/core/lock_store.py @@ -200,6 +200,7 @@ def __init__( host: Text = "localhost", port: int = 6379, db: int = 1, + username: Optional[Text] = None, password: Optional[Text] = None, use_ssl: bool = False, ssl_certfile: Optional[Text] = None, @@ -215,6 +216,8 @@ def __init__( port: The port of the redis server. db: The name of the database within Redis which should be used by Rasa Open Source. + username: The username which should be used for authentication with the + Redis database. password: The password which should be used for authentication with the Redis database. use_ssl: `True` if SSL should be used for the connection to Redis. @@ -232,6 +235,7 @@ def __init__( host=host, port=int(port), db=int(db), + username=username, password=password, ssl=use_ssl, ssl_certfile=ssl_certfile, diff --git a/rasa/core/migrate.py b/rasa/core/migrate.py index 37433cdd2d9c..d9dbe4e4c2cb 100644 --- a/rasa/core/migrate.py +++ b/rasa/core/migrate.py @@ -387,9 +387,9 @@ def migrate_domain_format( _write_final_domain(domain_path, new_forms, new_slots, out_path) rasa.shared.utils.cli.print_success( - f"Your domain file '{str(domain_path)}' was successfully migrated! " - f"The migrated version is now '{str(out_path)}'. " - f"The original domain file is backed-up at '{str(backup_location)}'." + f"Your domain file '{domain_path!s}' was successfully migrated! " + f"The migrated version is now '{out_path!s}'. " + f"The original domain file is backed-up at '{backup_location!s}'." ) except Exception as e: diff --git a/rasa/core/nlg/response.py b/rasa/core/nlg/response.py index 756b243602d8..89072567d209 100644 --- a/rasa/core/nlg/response.py +++ b/rasa/core/nlg/response.py @@ -152,12 +152,12 @@ def _format_response_conditions(response_conditions: List[Dict[Text, Any]]) -> T formatted_response_conditions = [""] for index, condition in enumerate(response_conditions): constraints = [] - constraints.append(f"type: {str(condition['type'])}") - constraints.append(f"name: {str(condition['name'])}") - constraints.append(f"value: {str(condition['value'])}") + constraints.append(f"type: {condition['type']!s}") + constraints.append(f"name: {condition['name']!s}") + constraints.append(f"value: {condition['value']!s}") condition_message = " | ".join(constraints) - formatted_condition = f"[condition {str(index + 1)}] {condition_message}" + formatted_condition = f"[condition {index + 1!s}] {condition_message}" formatted_response_conditions.append(formatted_condition) return "\n".join(formatted_response_conditions) diff --git a/rasa/core/policies/flow_policy.py b/rasa/core/policies/flow_policy.py index 99d2937d3159..74e74d4b074a 100644 --- a/rasa/core/policies/flow_policy.py +++ b/rasa/core/policies/flow_policy.py @@ -7,6 +7,9 @@ from structlog.contextvars import ( bound_contextvars, ) +from rasa.dialogue_understanding.patterns.internal_error import ( + InternalErrorPatternFlowStackFrame, +) from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack from rasa.dialogue_understanding.stack.frames import ( BaseFlowStackFrame, @@ -23,7 +26,10 @@ ContinueInterruptedPatternFlowStackFrame, ) from rasa.dialogue_understanding.stack.frames.flow_stack_frame import FlowStackFrameType -from rasa.dialogue_understanding.stack.utils import top_user_flow_frame +from rasa.dialogue_understanding.stack.utils import ( + end_top_user_flow, + top_user_flow_frame, +) from rasa.core.constants import ( DEFAULT_POLICY_PRIORITY, @@ -36,7 +42,6 @@ from rasa.shared.core.constants import ( ACTION_LISTEN_NAME, ACTION_SEND_TEXT_NAME, - DIALOGUE_STACK_SLOT, ) from rasa.shared.core.events import Event, SlotSet from rasa.shared.core.flows.flow import ( @@ -61,7 +66,7 @@ StaticFlowLink, ) from rasa.core.featurizers.tracker_featurizers import TrackerFeaturizer -from rasa.core.policies.policy import Policy, PolicyPrediction, SupportedData +from rasa.core.policies.policy import Policy, PolicyPrediction from rasa.engine.graph import ExecutionContext from rasa.engine.recipes.default_recipe import DefaultV1Recipe from rasa.engine.storage.resource import Resource @@ -73,15 +78,39 @@ ) import structlog +from rasa.shared.exceptions import RasaException + structlogger = structlog.get_logger() +MAX_NUMBER_OF_STEPS = 250 -class FlowException(Exception): + +class FlowException(RasaException): """Exception that is raised when there is a problem with a flow.""" pass +class FlowCircuitBreakerTrippedException(FlowException): + """Exception that is raised when there is a problem with a flow.""" + + def __init__( + self, dialogue_stack: DialogueStack, number_of_steps_taken: int + ) -> None: + """Creates a `FlowCircuitBreakerTrippedException`. + + Args: + dialogue_stack: The dialogue stack. + number_of_steps_taken: The number of steps that were taken. + """ + super().__init__( + f"Flow circuit breaker tripped after {number_of_steps_taken} steps. " + "There appears to be an infinite loop in the flows." + ) + self.dialogue_stack = dialogue_stack + self.number_of_steps_taken = number_of_steps_taken + + @DefaultV1Recipe.register( DefaultV1Recipe.ComponentType.POLICY_WITHOUT_END_TO_END_SUPPORT, is_trainable=False ) @@ -94,7 +123,15 @@ class FlowPolicy(Policy): @staticmethod def does_support_stack_frame(frame: DialogueStackFrame) -> bool: - """Checks if the policy supports the given stack frame.""" + """Checks if the policy supports the topmost frame on the dialogue stack. + + If `False` is returned, the policy will abstain from making a prediction. + + Args: + frame: The frame to check. + + Returns: + `True` if the policy supports the frame, `False` otherwise.""" return isinstance(frame, BaseFlowStackFrame) @staticmethod @@ -106,18 +143,6 @@ def get_default_config() -> Dict[Text, Any]: POLICY_MAX_HISTORY: None, } - @staticmethod - def supported_data() -> SupportedData: - """The type of data supported by this policy. - - By default, this is only ML-based training data. If policies support rule data, - or both ML-based data and rule data, they need to override this method. - - Returns: - The data type supported by this policy (ML-based training data). - """ - return SupportedData.ML_DATA - def __init__( self, config: Dict[Text, Any], @@ -150,9 +175,6 @@ def train( A policy must return its resource locator so that potential children nodes can load the policy from the resource. """ - # currently, nothing to do here. we have access to the flows during - # prediction. we might want to store the flows in the future - # or do some preprocessing here. return self.resource def predict_action_probabilities( @@ -178,20 +200,44 @@ def predict_action_probabilities( The prediction. """ if not self.supports_current_stack_frame(tracker): + # if the policy doesn't support the current stack frame, we'll abstain return self._prediction(self._default_predictions(domain)) flows = flows or FlowsList([]) executor = FlowExecutor.from_tracker(tracker, flows, domain) # create executor and predict next action - prediction = executor.advance_flows(tracker) - return self._create_prediction_result( - prediction.action_name, - domain, - prediction.score, - prediction.events, - prediction.metadata, - ) + try: + prediction = executor.advance_flows(tracker) + return self._create_prediction_result( + prediction.action_name, + domain, + prediction.score, + prediction.events, + prediction.metadata, + ) + except FlowCircuitBreakerTrippedException as e: + structlogger.error( + "flow.circuit_breaker", + dialogue_stack=e.dialogue_stack, + number_of_steps_taken=e.number_of_steps_taken, + event_info=( + "The flow circuit breaker tripped. " + "There appears to be an infinite loop in the flows." + ), + ) + # end the current flow and start the internal error flow + end_top_user_flow(executor.dialogue_stack) + executor.dialogue_stack.push(InternalErrorPatternFlowStackFrame()) + # we retry, with the internal error frame on the stack + prediction = executor.advance_flows(tracker) + return self._create_prediction_result( + prediction.action_name, + domain, + prediction.score, + prediction.events, + prediction.metadata, + ) def _create_prediction_result( self, @@ -298,7 +344,7 @@ def is_condition_satisfied( """Evaluate a predicate condition.""" # attach context to the predicate evaluation to allow conditions using it - context = {"context": DialogueStack.from_tracker(tracker).current_context()} + context = {"context": self.dialogue_stack.current_context()} document: Dict[str, Any] = context.copy() for slot in self.domain.slots: document[slot.name] = tracker.get_slot(slot.name) @@ -425,20 +471,15 @@ def advance_flows(self, tracker: DialogueStateTracker) -> ActionPrediction: return ActionPrediction(None, 0.0) else: previous_stack = DialogueStack.get_persisted_stack(tracker) - prediction = self._select_next_action(tracker) + prediction = self.select_next_action(tracker) if previous_stack != self.dialogue_stack.as_dict(): # we need to update dialogue stack to persist the state of the executor if not prediction.events: prediction.events = [] - prediction.events.append( - SlotSet( - DIALOGUE_STACK_SLOT, - self.dialogue_stack.as_dict(), - ) - ) + prediction.events.append(self.dialogue_stack.persist_as_event()) return prediction - def _select_next_action( + def select_next_action( self, tracker: DialogueStateTracker, ) -> ActionPrediction: @@ -462,7 +503,16 @@ def _select_next_action( number_of_initial_events = len(tracker.events) + number_of_steps_taken = 0 + while isinstance(step_result, ContinueFlowWithNextStep): + + number_of_steps_taken += 1 + if number_of_steps_taken > MAX_NUMBER_OF_STEPS: + raise FlowCircuitBreakerTrippedException( + self.dialogue_stack, number_of_steps_taken + ) + active_frame = self.dialogue_stack.top() if not isinstance(active_frame, BaseFlowStackFrame): # If there is no current flow, we assume that all flows are done @@ -485,7 +535,7 @@ def _select_next_action( self._advance_top_flow_on_stack(current_step.id) with bound_contextvars(step_id=current_step.id): - step_result = self._run_step( + step_result = self.run_step( current_flow, current_step, tracker ) tracker.update_with_events(step_result.events, self.domain) @@ -521,7 +571,7 @@ def _reset_scoped_slots( events.append(SlotSet(step.collect, initial_value)) return events - def _run_step( + def run_step( self, flow: Flow, step: FlowStep, diff --git a/rasa/core/policies/policy.py b/rasa/core/policies/policy.py index acf17ea8661c..d74769d8f9b6 100644 --- a/rasa/core/policies/policy.py +++ b/rasa/core/policies/policy.py @@ -474,34 +474,32 @@ def format_tracker_states(states: List[Dict]) -> Text: if state: if USER in state: if TEXT in state[USER]: - state_messages.append( - f"user text: {str(state[USER][TEXT])}" - ) + state_messages.append(f"user text: {state[USER][TEXT]!s}") if INTENT in state[USER]: state_messages.append( - f"user intent: {str(state[USER][INTENT])}" + f"user intent: {state[USER][INTENT]!s}" ) if ENTITIES in state[USER]: state_messages.append( - f"user entities: {str(state[USER][ENTITIES])}" + f"user entities: {state[USER][ENTITIES]!s}" ) if PREVIOUS_ACTION in state: if ACTION_NAME in state[PREVIOUS_ACTION]: state_messages.append( f"previous action name: " - f"{str(state[PREVIOUS_ACTION][ACTION_NAME])}" + f"{state[PREVIOUS_ACTION][ACTION_NAME]!s}" ) if ACTION_TEXT in state[PREVIOUS_ACTION]: state_messages.append( f"previous action text: " - f"{str(state[PREVIOUS_ACTION][ACTION_TEXT])}" + f"{state[PREVIOUS_ACTION][ACTION_TEXT]!s}" ) if ACTIVE_LOOP in state: - state_messages.append(f"active loop: {str(state[ACTIVE_LOOP])}") + state_messages.append(f"active loop: {state[ACTIVE_LOOP]!s}") if SLOTS in state: - state_messages.append(f"slots: {str(state[SLOTS])}") + state_messages.append(f"slots: {state[SLOTS]!s}") state_message_formatted = " | ".join(state_messages) - state_formatted = f"[state {str(index)}] {state_message_formatted}" + state_formatted = f"[state {index!s}] {state_message_formatted}" formatted_states.append(state_formatted) return "\n".join(formatted_states) diff --git a/rasa/core/test.py b/rasa/core/test.py index 81215a986239..1bd54082a335 100644 --- a/rasa/core/test.py +++ b/rasa/core/test.py @@ -247,11 +247,10 @@ def _compare_entities( i_pred: int, i_target: int, ) -> int: - """ - Compare the current predicted and target entities and decide which one + """Compare the current predicted and target entities and decide which one comes first. If the predicted entity comes first it returns -1, while it returns 1 if the target entity comes first. - If target and predicted are aligned it returns 0 + If target and predicted are aligned it returns 0. """ pred = None target = None @@ -363,7 +362,8 @@ class WronglyClassifiedUserUtterance(UserUttered): """The NLU model predicted the wrong user utterance. Mostly used to mark wrong predictions and be able to - dump them as stories.""" + dump them as stories. + """ type_name = "wrong_utterance" diff --git a/rasa/core/tracker_store.py b/rasa/core/tracker_store.py index 72559d8a3712..21ae6e7d7e9d 100644 --- a/rasa/core/tracker_store.py +++ b/rasa/core/tracker_store.py @@ -450,6 +450,7 @@ def __init__( host: Text = "localhost", port: int = 6379, db: int = 0, + username: Optional[Text] = None, password: Optional[Text] = None, event_broker: Optional[EventBroker] = None, record_exp: Optional[float] = None, @@ -467,6 +468,7 @@ def __init__( host=host, port=port, db=db, + username=username, password=password, ssl=use_ssl, ssl_keyfile=ssl_keyfile, @@ -1063,8 +1065,8 @@ def __init__( host: Optional[Text] = None, port: Optional[int] = None, db: Text = "rasa.db", - username: Text = None, - password: Text = None, + username: Optional[Text] = None, + password: Optional[Text] = None, event_broker: Optional[EventBroker] = None, login_db: Optional[Text] = None, query: Optional[Dict] = None, @@ -1080,9 +1082,7 @@ def __init__( self.engine = sa.create_engine(engine_url, **create_engine_kwargs(engine_url)) - logger.debug( - f"Attempting to connect to database via '{repr(self.engine.url)}'." - ) + logger.debug(f"Attempting to connect to database via '{self.engine.url!r}'.") # Database might take a while to come up while True: @@ -1123,8 +1123,8 @@ def get_db_url( host: Optional[Text] = None, port: Optional[int] = None, db: Text = "rasa.db", - username: Text = None, - password: Text = None, + username: Optional[Text] = None, + password: Optional[Text] = None, login_db: Optional[Text] = None, query: Optional[Dict] = None, ) -> Union[Text, "URL"]: diff --git a/rasa/core/training/__init__.py b/rasa/core/training/__init__.py index 414f945fdb44..82776b7a5629 100644 --- a/rasa/core/training/__init__.py +++ b/rasa/core/training/__init__.py @@ -41,8 +41,7 @@ def load_data( debug_plots: bool = False, exclusion_percentage: Optional[int] = None, ) -> List["TrackerWithCachedStates"]: - """ - Load training data from a resource. + """Load training data from a resource. Args: resource_name: resource to load the data from. either a path or an importer diff --git a/rasa/core/training/converters/responses_prefix_converter.py b/rasa/core/training/converters/responses_prefix_converter.py index 8be36e933831..39b5ac0df8b1 100644 --- a/rasa/core/training/converters/responses_prefix_converter.py +++ b/rasa/core/training/converters/responses_prefix_converter.py @@ -33,8 +33,7 @@ def normalize_utter_action(action_name: Text) -> Text: class StoryResponsePrefixConverter(TrainingDataConverter): - """ - Converter responsible for ensuring that retrieval intent actions in stories + """Converter responsible for ensuring that retrieval intent actions in stories start with `utter_` instead of `respond_`. """ @@ -76,8 +75,7 @@ async def convert_and_write(cls, source_path: Path, output_path: Path) -> None: class DomainResponsePrefixConverter(TrainingDataConverter): - """ - Converter responsible for ensuring that retrieval intent actions in domain + """Converter responsible for ensuring that retrieval intent actions in domain start with `utter_` instead of `respond_`. """ diff --git a/rasa/core/training/interactive.py b/rasa/core/training/interactive.py index 860dae007145..de10cd6a69ef 100644 --- a/rasa/core/training/interactive.py +++ b/rasa/core/training/interactive.py @@ -133,7 +133,8 @@ class ForkTracker(Exception): """Exception used to break out the flow and fork at a previous step. The tracker will be reset to the selected point in the past and the - conversation will continue from there.""" + conversation will continue from there. + """ pass @@ -142,7 +143,8 @@ class UndoLastStep(Exception): """Exception used to break out the flow and undo the last step. The last step is either the most recent user message or the most - recent action run by the bot.""" + recent action run by the bot. + """ pass @@ -407,8 +409,8 @@ async def _request_fork_from_user( """Take in a conversation and ask at which point to fork the conversation. Returns the list of events that should be kept. Forking means, the - conversation will be reset and continued from this previous point.""" - + conversation will be reset and continued from this previous point. + """ tracker = await retrieve_tracker( endpoint, conversation_id, EventVerbosity.AFTER_RESTART ) @@ -436,8 +438,8 @@ async def _request_intent_from_user( ) -> Dict[Text, Any]: """Take in latest message and ask which intent it should have been. - Returns the intent dict that has been selected by the user.""" - + Returns the intent dict that has been selected by the user. + """ predictions = latest_message.get("parse_data", {}).get("intent_ranking", []) predicted_intents = {p[INTENT_NAME_KEY] for p in predictions} @@ -495,7 +497,8 @@ def _chat_history_table(events: List[Dict[Text, Any]]) -> Text: """Create a table containing bot and user messages. Also includes additional information, like any events and - prediction probabilities.""" + prediction probabilities. + """ def wrap(txt: Text, max_width: int) -> Text: true_wrapping_width = calc_true_wrapping_width(txt, max_width) @@ -668,7 +671,6 @@ async def _request_action_from_user( predictions: List[Dict[Text, Any]], conversation_id: Text, endpoint: EndpointConfig ) -> Tuple[Text, bool]: """Ask the user to correct an action prediction.""" - await _print_history(conversation_id, endpoint) choices = [ @@ -764,7 +766,8 @@ def _split_conversation_at_restarts( ) -> List[List[Dict[Text, Any]]]: """Split a conversation at restart events. - Returns an array of event lists, without the restart events.""" + Returns an array of event lists, without the restart events. + """ deserialized_events = [Event.from_parameters(event) for event in events] split_events = rasa.shared.core.events.split_events( deserialized_events, Restarted, include_splitting_event=False @@ -775,8 +778,8 @@ def _split_conversation_at_restarts( def _collect_messages(events: List[Dict[Text, Any]]) -> List[Message]: """Collect the message text and parsed data from the UserMessage events - into a list""" - + into a list. + """ import rasa.shared.nlu.training_data.util as rasa_nlu_training_data_utils messages = [] @@ -797,7 +800,6 @@ def _collect_messages(events: List[Dict[Text, Any]]) -> List[Message]: def _collect_actions(events: List[Dict[Text, Any]]) -> List[Dict[Text, Any]]: """Collect all the `ActionExecuted` events into a list.""" - return [evt for evt in events if evt.get("event") == ActionExecuted.type_name] @@ -849,8 +851,7 @@ def _write_stories_to_file( def _filter_messages(msgs: List[Message]) -> List[Message]: - """Filter messages removing those that start with INTENT_MESSAGE_PREFIX""" - + """Filter messages removing those that start with INTENT_MESSAGE_PREFIX.""" filtered_messages = [] for msg in msgs: if not msg.get(TEXT).startswith(INTENT_MESSAGE_PREFIX): @@ -869,9 +870,7 @@ def _write_nlu_to_file(export_nlu_path: Text, events: List[Dict[Text, Any]]) -> try: previous_examples = loading.load_data(export_nlu_path) except Exception as e: - logger.debug( - f"An exception occurred while trying to load the NLU data. {str(e)}" - ) + logger.debug(f"An exception occurred while trying to load the NLU data. {e!s}") # No previous file exists, use empty training data as replacement. previous_examples = TrainingData() @@ -907,7 +906,6 @@ def _entities_from_messages(messages: List[Message]) -> List[Text]: def _intents_from_messages(messages: List[Message]) -> Set[Text]: """Return all intents that occur in at least one of the messages.""" - # set of distinct intents distinct_intents = {m.data["intent"] for m in messages if "intent" in m.data} @@ -918,7 +916,6 @@ def _write_domain_to_file( domain_path: Text, events: List[Dict[Text, Any]], old_domain: Domain ) -> None: """Write an updated domain file to the file path.""" - io_utils.create_path(domain_path) messages = _collect_messages(events) @@ -954,7 +951,6 @@ async def _predict_till_next_listen( plot_file: Optional[Text], ) -> None: """Predict and validate actions until we need to wait for a user message.""" - listen = False while not listen: result = await request_prediction(endpoint, conversation_id) @@ -1593,7 +1589,6 @@ def _serve_application( async def run_interactive_io(running_app: Sanic) -> None: """Small wrapper to shut down the server once cmd io is done.""" - await record_messages( endpoint=endpoint, file_importer=file_importer, @@ -1646,7 +1641,7 @@ def run_interactive_learning( file_importer: TrainingDataImporter, skip_visualization: bool = False, conversation_id: Text = uuid.uuid4().hex, - server_args: Dict[Text, Any] = None, + server_args: Optional[Dict[Text, Any]] = None, ) -> None: """Start the interactive learning with the model of the agent.""" global SAVE_IN_E2E diff --git a/rasa/core/training/story_conflict.py b/rasa/core/training/story_conflict.py index a6e7ec320a2e..a00cca2ca3a5 100644 --- a/rasa/core/training/story_conflict.py +++ b/rasa/core/training/story_conflict.py @@ -29,13 +29,11 @@ class StoryConflict: """ def __init__(self, sliced_states: List[State]) -> None: - """ - Creates a `StoryConflict` from a given state. + """Creates a `StoryConflict` from a given state. Args: sliced_states: The (sliced) dialogue state at which the conflict occurs. """ - self._sliced_states = sliced_states # A list of actions that all follow from the same state. self._conflicting_actions: DefaultDict[Text, List[Text]] = defaultdict( @@ -346,7 +344,6 @@ def _get_previous_event( Returns: Tuple of (type, name) strings of the prior event. """ - previous_event_type = None previous_event_name = None diff --git a/rasa/core/utils.py b/rasa/core/utils.py index 0f3150eb5241..22e753b5c241 100644 --- a/rasa/core/utils.py +++ b/rasa/core/utils.py @@ -117,7 +117,7 @@ def find_route(suffix: Text, path: Text) -> Optional[Text]: for arg in route._params: options[arg] = f"[{arg}]" - handlers = [(list(route.methods)[0], route.name.replace("rasa_server.", ""))] + handlers = [(next(iter(route.methods)), route.name.replace("rasa_server.", ""))] for method, name in handlers: full_endpoint = "/" + "/".join(endpoint) diff --git a/rasa/dialogue_understanding/commands/cancel_flow_command.py b/rasa/dialogue_understanding/commands/cancel_flow_command.py index 9a880111e7fe..904fdc775b82 100644 --- a/rasa/dialogue_understanding/commands/cancel_flow_command.py +++ b/rasa/dialogue_understanding/commands/cancel_flow_command.py @@ -9,8 +9,7 @@ from rasa.dialogue_understanding.patterns.cancel import CancelPatternFlowStackFrame from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack from rasa.dialogue_understanding.stack.frames import UserFlowStackFrame -from rasa.shared.core.constants import DIALOGUE_STACK_SLOT -from rasa.shared.core.events import Event, SlotSet +from rasa.shared.core.events import Event from rasa.shared.core.flows.flow import FlowsList from rasa.shared.core.trackers import DialogueStateTracker from rasa.dialogue_understanding.stack.utils import top_user_flow_frame @@ -48,9 +47,9 @@ def select_canceled_frames(stack: DialogueStack) -> List[str]: The frames that were canceled.""" canceled_frames = [] # we need to go through the original stack dump in reverse order - # to find the frames that were canceled. we cancel everthing from + # to find the frames that were canceled. we cancel everything from # the top of the stack until we hit the user flow that was canceled. - # this will also cancel any patterns put ontop of that user flow, + # this will also cancel any patterns put on top of that user flow, # e.g. corrections. for frame in reversed(stack.frames): canceled_frames.append(frame.frame_id) @@ -103,4 +102,4 @@ def run_command_on_tracker( canceled_frames=canceled_frames, ) ) - return [SlotSet(DIALOGUE_STACK_SLOT, stack.as_dict())] + return [stack.persist_as_event()] diff --git a/rasa/dialogue_understanding/commands/chit_chat_answer_command.py b/rasa/dialogue_understanding/commands/chit_chat_answer_command.py index 38be12d210b8..e8559e46c820 100644 --- a/rasa/dialogue_understanding/commands/chit_chat_answer_command.py +++ b/rasa/dialogue_understanding/commands/chit_chat_answer_command.py @@ -3,10 +3,9 @@ from dataclasses import dataclass from typing import Any, Dict, List from rasa.dialogue_understanding.commands import FreeFormAnswerCommand +from rasa.dialogue_understanding.patterns.chitchat import ChitchatPatternFlowStackFrame from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack -from rasa.dialogue_understanding.stack.frames.chit_chat_frame import ChitChatStackFrame -from rasa.shared.core.constants import DIALOGUE_STACK_SLOT -from rasa.shared.core.events import Event, SlotSet +from rasa.shared.core.events import Event from rasa.shared.core.flows.flow import FlowsList from rasa.shared.core.trackers import DialogueStateTracker @@ -46,5 +45,5 @@ def run_command_on_tracker( The events to apply to the tracker. """ stack = DialogueStack.from_tracker(tracker) - stack.push(ChitChatStackFrame()) - return [SlotSet(DIALOGUE_STACK_SLOT, stack.as_dict())] + stack.push(ChitchatPatternFlowStackFrame()) + return [stack.persist_as_event()] diff --git a/rasa/dialogue_understanding/commands/clarify_command.py b/rasa/dialogue_understanding/commands/clarify_command.py index 69a413730e0f..21bbd9ec6f51 100644 --- a/rasa/dialogue_understanding/commands/clarify_command.py +++ b/rasa/dialogue_understanding/commands/clarify_command.py @@ -7,8 +7,7 @@ from rasa.dialogue_understanding.commands import Command from rasa.dialogue_understanding.patterns.clarify import ClarifyPatternFlowStackFrame from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack -from rasa.shared.core.constants import DIALOGUE_STACK_SLOT -from rasa.shared.core.events import Event, SlotSet +from rasa.shared.core.events import Event from rasa.shared.core.flows.flow import FlowsList from rasa.shared.core.trackers import DialogueStateTracker @@ -76,4 +75,4 @@ def run_command_on_tracker( relevant_flows = [all_flows.flow_by_id(opt) for opt in clean_options] names = [flow.readable_name() for flow in relevant_flows if flow is not None] stack.push(ClarifyPatternFlowStackFrame(names=names)) - return [SlotSet(DIALOGUE_STACK_SLOT, stack.as_dict())] + return [stack.persist_as_event()] diff --git a/rasa/dialogue_understanding/commands/correct_slots_command.py b/rasa/dialogue_understanding/commands/correct_slots_command.py index 2c80ed35e531..bc29b90b6a9f 100644 --- a/rasa/dialogue_understanding/commands/correct_slots_command.py +++ b/rasa/dialogue_understanding/commands/correct_slots_command.py @@ -15,8 +15,7 @@ BaseFlowStackFrame, UserFlowStackFrame, ) -from rasa.shared.core.constants import DIALOGUE_STACK_SLOT -from rasa.shared.core.events import Event, SlotSet +from rasa.shared.core.events import Event from rasa.shared.core.flows.flow import END_STEP, ContinueFlowStep, FlowStep, FlowsList from rasa.shared.core.trackers import DialogueStateTracker import rasa.dialogue_understanding.stack.utils as utils @@ -284,4 +283,4 @@ def run_command_on_tracker( self.end_previous_correction(top_flow_frame, stack) stack.push(correction_frame, index=insertion_index) - return [SlotSet(DIALOGUE_STACK_SLOT, stack.as_dict())] + return [stack.persist_as_event()] diff --git a/rasa/dialogue_understanding/commands/error_command.py b/rasa/dialogue_understanding/commands/error_command.py index b5e62cb90d1c..da5b3fbaf393 100644 --- a/rasa/dialogue_understanding/commands/error_command.py +++ b/rasa/dialogue_understanding/commands/error_command.py @@ -9,8 +9,7 @@ InternalErrorPatternFlowStackFrame, ) from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack -from rasa.shared.core.constants import DIALOGUE_STACK_SLOT -from rasa.shared.core.events import Event, SlotSet +from rasa.shared.core.events import Event from rasa.shared.core.flows.flow import FlowsList from rasa.shared.core.trackers import DialogueStateTracker @@ -54,4 +53,4 @@ def run_command_on_tracker( dialogue_stack = DialogueStack.from_tracker(tracker) structlogger.debug("command_executor.error", command=self) dialogue_stack.push(InternalErrorPatternFlowStackFrame()) - return [SlotSet(DIALOGUE_STACK_SLOT, dialogue_stack.as_dict())] + return [dialogue_stack.persist_as_event()] diff --git a/rasa/dialogue_understanding/commands/handle_code_change_command.py b/rasa/dialogue_understanding/commands/handle_code_change_command.py new file mode 100644 index 000000000000..c54bce685f17 --- /dev/null +++ b/rasa/dialogue_understanding/commands/handle_code_change_command.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any, Dict, List + +import structlog + +from rasa.dialogue_understanding.commands import Command +from rasa.dialogue_understanding.patterns.code_change import CodeChangeFlowStackFrame +from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack +from rasa.shared.core.constants import DIALOGUE_STACK_SLOT +from rasa.shared.core.events import Event, SlotSet +from rasa.shared.core.flows.flow import FlowsList +from rasa.shared.core.trackers import DialogueStateTracker +from rasa.dialogue_understanding.stack.utils import top_user_flow_frame + +structlogger = structlog.get_logger() + + +@dataclass +class HandleCodeChangeCommand(Command): + """A that is executed when the flows have changed.""" + + @classmethod + def command(cls) -> str: + """Returns the command type.""" + return "handle code change" + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> HandleCodeChangeCommand: + """Converts the dictionary to a command. + + Returns: + The converted dictionary. + """ + return HandleCodeChangeCommand() + + def run_command_on_tracker( + self, + tracker: DialogueStateTracker, + all_flows: FlowsList, + original_tracker: DialogueStateTracker, + ) -> List[Event]: + """Runs the command on the tracker. + + Args: + tracker: The tracker to run the command on. + all_flows: All flows in the assistant. + original_tracker: The tracker before any command was executed. + + Returns: + The events to apply to the tracker. + """ + + stack = DialogueStack.from_tracker(tracker) + original_stack = DialogueStack.from_tracker(original_tracker) + user_frame = top_user_flow_frame(original_stack) + current_flow = user_frame.flow(all_flows) if user_frame else None + + if not current_flow: + structlogger.debug( + "handle_code_change_command.skip.no_active_flow", command=self + ) + return [] + + stack.push(CodeChangeFlowStackFrame()) + return [SlotSet(DIALOGUE_STACK_SLOT, stack.as_dict())] diff --git a/rasa/dialogue_understanding/commands/knowledge_answer_command.py b/rasa/dialogue_understanding/commands/knowledge_answer_command.py index 3077dd44a739..bcac001b2c57 100644 --- a/rasa/dialogue_understanding/commands/knowledge_answer_command.py +++ b/rasa/dialogue_understanding/commands/knowledge_answer_command.py @@ -3,10 +3,9 @@ from dataclasses import dataclass from typing import Any, Dict, List from rasa.dialogue_understanding.commands import FreeFormAnswerCommand +from rasa.dialogue_understanding.patterns.search import SearchPatternFlowStackFrame from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack -from rasa.dialogue_understanding.stack.frames.search_frame import SearchStackFrame -from rasa.shared.core.constants import DIALOGUE_STACK_SLOT -from rasa.shared.core.events import Event, SlotSet +from rasa.shared.core.events import Event from rasa.shared.core.flows.flow import FlowsList from rasa.shared.core.trackers import DialogueStateTracker @@ -46,5 +45,5 @@ def run_command_on_tracker( The events to apply to the tracker. """ dialogue_stack = DialogueStack.from_tracker(tracker) - dialogue_stack.push(SearchStackFrame()) - return [SlotSet(DIALOGUE_STACK_SLOT, dialogue_stack.as_dict())] + dialogue_stack.push(SearchPatternFlowStackFrame()) + return [dialogue_stack.persist_as_event()] diff --git a/rasa/dialogue_understanding/commands/start_flow_command.py b/rasa/dialogue_understanding/commands/start_flow_command.py index cb3cd5d5166a..11ab06bb162b 100644 --- a/rasa/dialogue_understanding/commands/start_flow_command.py +++ b/rasa/dialogue_understanding/commands/start_flow_command.py @@ -14,8 +14,7 @@ top_user_flow_frame, user_flows_on_the_stack, ) -from rasa.shared.core.constants import DIALOGUE_STACK_SLOT -from rasa.shared.core.events import Event, SlotSet +from rasa.shared.core.events import Event from rasa.shared.core.flows.flow import FlowsList from rasa.shared.core.trackers import DialogueStateTracker @@ -88,4 +87,4 @@ def run_command_on_tracker( ) structlogger.debug("command_executor.start_flow", command=self) stack.push(UserFlowStackFrame(flow_id=self.flow, frame_type=frame_type)) - return [SlotSet(DIALOGUE_STACK_SLOT, stack.as_dict())] + return [stack.persist_as_event()] diff --git a/rasa/dialogue_understanding/generator/llm_command_generator.py b/rasa/dialogue_understanding/generator/llm_command_generator.py index d5ee1ac8bcc3..27c2068f3de5 100644 --- a/rasa/dialogue_understanding/generator/llm_command_generator.py +++ b/rasa/dialogue_understanding/generator/llm_command_generator.py @@ -38,6 +38,7 @@ from rasa.shared.nlu.training_data.training_data import TrainingData from rasa.shared.utils.llm import ( DEFAULT_OPENAI_CHAT_MODEL_NAME_ADVANCED, + get_prompt_template, llm_factory, tracker_as_readable_transcript, sanitize_message_for_prompt, @@ -70,10 +71,7 @@ class LLMCommandGenerator(GraphComponent, CommandGenerator): @staticmethod def get_default_config() -> Dict[str, Any]: """The component's default config (see parent class for full docstring).""" - return { - "prompt": DEFAULT_COMMAND_PROMPT_TEMPLATE, - LLM_CONFIG_KEY: None, - } + return {"prompt": None, LLM_CONFIG_KEY: None} def __init__( self, @@ -82,7 +80,10 @@ def __init__( resource: Resource, ) -> None: self.config = {**self.get_default_config(), **config} - self.prompt_template = self.config["prompt"] + self.prompt_template = get_prompt_template( + config.get("prompt"), + DEFAULT_COMMAND_PROMPT_TEMPLATE, + ) self._model_storage = model_storage self._resource = resource diff --git a/rasa/dialogue_understanding/patterns/cancel.py b/rasa/dialogue_understanding/patterns/cancel.py index fa1eda6316c4..b60df2e004cc 100644 --- a/rasa/dialogue_understanding/patterns/cancel.py +++ b/rasa/dialogue_understanding/patterns/cancel.py @@ -15,9 +15,9 @@ from rasa.core.channels.channel import OutputChannel from rasa.core.nlg.generator import NaturalLanguageGenerator from rasa.shared.constants import RASA_DEFAULT_FLOW_PATTERN_PREFIX -from rasa.shared.core.constants import ACTION_CANCEL_FLOW, DIALOGUE_STACK_SLOT +from rasa.shared.core.constants import ACTION_CANCEL_FLOW from rasa.shared.core.domain import Domain -from rasa.shared.core.events import Event, SlotSet +from rasa.shared.core.events import Event from rasa.shared.core.flows.flow import END_STEP, ContinueFlowStep from rasa.shared.core.trackers import DialogueStateTracker @@ -59,7 +59,7 @@ def from_dict(data: Dict[str, Any]) -> CancelPatternFlowStackFrame: The created `DialogueStackFrame`. """ return CancelPatternFlowStackFrame( - data["frame_id"], + frame_id=data["frame_id"], step_id=data["step_id"], canceled_name=data["canceled_name"], canceled_frames=data["canceled_frames"], @@ -111,4 +111,4 @@ async def run( frame_id=canceled_frame_id, ) - return [SlotSet(DIALOGUE_STACK_SLOT, stack.as_dict())] + return [stack.persist_as_event()] diff --git a/rasa/dialogue_understanding/patterns/chitchat.py b/rasa/dialogue_understanding/patterns/chitchat.py new file mode 100644 index 000000000000..d4f78a6270f7 --- /dev/null +++ b/rasa/dialogue_understanding/patterns/chitchat.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any, Dict +from rasa.shared.constants import RASA_DEFAULT_FLOW_PATTERN_PREFIX +from rasa.dialogue_understanding.stack.frames import PatternFlowStackFrame + + +FLOW_PATTERN_CHITCHAT = RASA_DEFAULT_FLOW_PATTERN_PREFIX + "chitchat" + + +@dataclass +class ChitchatPatternFlowStackFrame(PatternFlowStackFrame): + """A flow stack frame that gets added to respond to Chitchat.""" + + flow_id: str = FLOW_PATTERN_CHITCHAT + """The ID of the flow.""" + + @classmethod + def type(cls) -> str: + """Returns the type of the frame.""" + return FLOW_PATTERN_CHITCHAT + + @staticmethod + def from_dict(data: Dict[str, Any]) -> ChitchatPatternFlowStackFrame: + """Creates a `DialogueStackFrame` from a dictionary. + + Args: + data: The dictionary to create the `DialogueStackFrame` from. + + Returns: + The created `DialogueStackFrame`. + """ + return ChitchatPatternFlowStackFrame( + frame_id=data["frame_id"], + step_id=data["step_id"], + ) diff --git a/rasa/dialogue_understanding/patterns/clarify.py b/rasa/dialogue_understanding/patterns/clarify.py index 4a7b1df074f0..db877bf5ef66 100644 --- a/rasa/dialogue_understanding/patterns/clarify.py +++ b/rasa/dialogue_understanding/patterns/clarify.py @@ -12,9 +12,9 @@ from rasa.core.channels.channel import OutputChannel from rasa.core.nlg.generator import NaturalLanguageGenerator from rasa.shared.constants import RASA_DEFAULT_FLOW_PATTERN_PREFIX -from rasa.shared.core.constants import ACTION_CLARIFY_FLOWS, DIALOGUE_STACK_SLOT +from rasa.shared.core.constants import ACTION_CLARIFY_FLOWS from rasa.shared.core.domain import Domain -from rasa.shared.core.events import Event, SlotSet +from rasa.shared.core.events import Event from rasa.shared.core.trackers import DialogueStateTracker @@ -50,7 +50,7 @@ def from_dict(data: Dict[str, Any]) -> ClarifyPatternFlowStackFrame: The created `DialogueStackFrame`. """ return ClarifyPatternFlowStackFrame( - data["frame_id"], + frame_id=data["frame_id"], step_id=data["step_id"], names=data["names"], clarification_options=data["clarification_options"], @@ -98,4 +98,4 @@ async def run( options_string = self.assemble_options_string(top.names) top.clarification_options = options_string # since we modified the stack frame, we need to update the stack - return [SlotSet(DIALOGUE_STACK_SLOT, stack.as_dict())] + return [stack.persist_as_event()] diff --git a/rasa/dialogue_understanding/patterns/code_change.py b/rasa/dialogue_understanding/patterns/code_change.py new file mode 100644 index 000000000000..4a0ebf12ebeb --- /dev/null +++ b/rasa/dialogue_understanding/patterns/code_change.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any, Dict + +import structlog +from rasa.dialogue_understanding.stack.frames import ( + PatternFlowStackFrame, +) +from rasa.shared.constants import RASA_DEFAULT_FLOW_PATTERN_PREFIX + +structlogger = structlog.get_logger() + +FLOW_PATTERN_CODE_CHANGE_ID = RASA_DEFAULT_FLOW_PATTERN_PREFIX + "code_change" + + +@dataclass +class CodeChangeFlowStackFrame(PatternFlowStackFrame): + """A pattern flow stack frame which cleans the stack after a bot update.""" + + flow_id: str = FLOW_PATTERN_CODE_CHANGE_ID + """The ID of the flow.""" + + @classmethod + def type(cls) -> str: + """Returns the type of the frame.""" + return FLOW_PATTERN_CODE_CHANGE_ID + + @staticmethod + def from_dict(data: Dict[str, Any]) -> CodeChangeFlowStackFrame: + """Creates a `DialogueStackFrame` from a dictionary. + + Args: + data: The dictionary to create the `DialogueStackFrame` from. + + Returns: + The created `DialogueStackFrame`. + """ + return CodeChangeFlowStackFrame( + frame_id=data["frame_id"], + step_id=data["step_id"], + ) diff --git a/rasa/dialogue_understanding/patterns/collect_information.py b/rasa/dialogue_understanding/patterns/collect_information.py index 2bda7588a622..9aa35824e888 100644 --- a/rasa/dialogue_understanding/patterns/collect_information.py +++ b/rasa/dialogue_understanding/patterns/collect_information.py @@ -52,7 +52,7 @@ def from_dict(data: Dict[str, Any]) -> CollectInformationPatternFlowStackFrame: ] return CollectInformationPatternFlowStackFrame( - data["frame_id"], + frame_id=data["frame_id"], step_id=data["step_id"], collect=data["collect"], utter=data["utter"], diff --git a/rasa/dialogue_understanding/patterns/completed.py b/rasa/dialogue_understanding/patterns/completed.py index 5852be45c1d8..1392a403804a 100644 --- a/rasa/dialogue_understanding/patterns/completed.py +++ b/rasa/dialogue_understanding/patterns/completed.py @@ -34,7 +34,7 @@ def from_dict(data: Dict[str, Any]) -> CompletedPatternFlowStackFrame: The created `DialogueStackFrame`. """ return CompletedPatternFlowStackFrame( - data["frame_id"], + frame_id=data["frame_id"], step_id=data["step_id"], previous_flow_name=data["previous_flow_name"], ) diff --git a/rasa/dialogue_understanding/patterns/continue_interrupted.py b/rasa/dialogue_understanding/patterns/continue_interrupted.py index 1137408c8383..7a45f9e677b2 100644 --- a/rasa/dialogue_understanding/patterns/continue_interrupted.py +++ b/rasa/dialogue_understanding/patterns/continue_interrupted.py @@ -36,7 +36,7 @@ def from_dict(data: Dict[str, Any]) -> ContinueInterruptedPatternFlowStackFrame: The created `DialogueStackFrame`. """ return ContinueInterruptedPatternFlowStackFrame( - data["frame_id"], + frame_id=data["frame_id"], step_id=data["step_id"], previous_flow_name=data["previous_flow_name"], ) diff --git a/rasa/dialogue_understanding/patterns/correction.py b/rasa/dialogue_understanding/patterns/correction.py index e8626fa6ff20..1409bba8fba1 100644 --- a/rasa/dialogue_understanding/patterns/correction.py +++ b/rasa/dialogue_understanding/patterns/correction.py @@ -7,9 +7,6 @@ ) from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack from rasa.shared.constants import RASA_DEFAULT_FLOW_PATTERN_PREFIX -from rasa.shared.core.constants import ( - DIALOGUE_STACK_SLOT, -) from rasa.shared.core.flows.flow import ( START_STEP, ) @@ -75,7 +72,7 @@ def from_dict(data: Dict[Text, Any]) -> CorrectionPatternFlowStackFrame: The created `DialogueStackFrame`. """ return CorrectionPatternFlowStackFrame( - data["frame_id"], + frame_id=data["frame_id"], step_id=data["step_id"], is_reset_only=data["is_reset_only"], corrected_slots=data["corrected_slots"], @@ -139,7 +136,7 @@ async def run( ContinueFlowStep.continue_step_for_id(END_STEP) ) - events: List[Event] = [SlotSet(DIALOGUE_STACK_SLOT, stack.as_dict())] + events: List[Event] = [stack.persist_as_event()] events.extend([SlotSet(k, v) for k, v in top.corrected_slots.items()]) diff --git a/rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml b/rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml index cce270ce4339..a8747ed916bd 100644 --- a/rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +++ b/rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml @@ -1,37 +1,47 @@ version: "3.1" responses: utter_flow_continue_interrupted: - - text: Let's continue with the topic {{ context.previous_flow_name }}. + - text: "Let's continue with {{ context.previous_flow_name }}." metadata: rephrase: True template: jinja utter_corrected_previous_input: - - text: "Ok, I corrected the {{ context.corrected_slots.keys()|join(', ') }}." + - text: "Ok, I am updating {{ context.corrected_slots.keys()|join(', ') }} to {{ context.corrected_slots.values()|join(', ') }} respectively." metadata: rephrase: True template: jinja utter_flow_cancelled_rasa: - - text: Okay, stopping the flow {{ context.canceled_name }}. + - text: "Okay, stopping {{ context.canceled_name }}." metadata: rephrase: True template: jinja utter_can_do_something_else: - - text: "Is there anything else I can help you with?" + - text: "What else I can help you with?" metadata: rephrase: True utter_internal_error_rasa: - - text: Sorry, I'm having trouble understanding you right now. Please try again later. + - text: Sorry, I am having trouble with that. Please try again in a few minutes. utter_clarification_options_rasa: - - text: I'm not sure what you'd like to achieve. Do you want to {{context.clarification_options}}? + - text: "I can help, but I need more information. Which of these would you like to do: {{context.clarification_options}}?" metadata: rephrase: True template: jinja + utter_inform_code_change: + - text: There has been an update to my code. I need to wrap up our running dialogue and start from scratch. + metadata: + rephrase: True + + utter_no_knowledge_base: + - text: I am afraid, I don't know the answer. At this point, I don't have access to a knowledge base. + metadata: + rephrase: True + utter_default_slot_rejection: - text: Sorry, you requested an option that is not valid. Please select one of the available options. metadata: @@ -45,31 +55,27 @@ slots: flows: pattern_continue_interrupted: - description: A flow that should will be started to continue an interrupted flow. + description: A flow that should will be started to continue an interrupted flow. + name: pattern continue interrupted steps: - - id: "0" - action: utter_flow_continue_interrupted + - action: utter_flow_continue_interrupted pattern_correction: description: Handle a correction of a slot value. - + name: pattern correction steps: - - id: "check_if_reset_only" + - action: action_correct_flow_slot next: - - if: context.is_reset_only - then: "jump_back_without_message" - - else: "correct_slots" - - id: "correct_slots" - action: action_correct_flow_slot - next: "inform_user" - - id: "inform_user" - action: utter_corrected_previous_input - - id: "jump_back_without_message" - action: action_correct_flow_slot + - if: not context.is_reset_only + then: + - action: utter_corrected_previous_input + next: "END" + - else: "END" pattern_cancel_flow: description: A meta flow that's started when a flow is cancelled. + name: pattern_cancel_flow steps: - id: "cancel_flow" @@ -80,47 +86,51 @@ flows: pattern_internal_error: description: internal error + name: pattern internal error steps: - - id: "0" - action: utter_internal_error_rasa + - action: utter_internal_error_rasa pattern_completed: description: a flow has been completed and there is nothing else to be done + name: pattern completed steps: - - id: "0" - action: utter_can_do_something_else + - action: utter_can_do_something_else pattern_chitchat: description: handle interactions with the user that are not task-oriented + name: pattern chitchat steps: - - id: "0" - generation_prompt: | + - generation_prompt: | You are an incredibly friendly assistant. Generate a short response to the user's comment in simple english. User: {{latest_user_message}} Response: + pattern_search: + description: handle a knowledge-based question or request + name: pattern search + steps: + - action: utter_no_knowledge_base + # - action: action_trigger_search to use doc search policy if present + pattern_clarification: description: handle clarifications with the user + name: pattern clarification steps: - - id: "0" - action: action_clarify_flows - next: "1" - - id: "1" - action: utter_clarification_options_rasa + - action: action_clarify_flows + - action: utter_clarification_options_rasa pattern_collect_information: description: flow used to fill a slot + name: pattern collect information steps: - id: "start" action: action_run_slot_rejections - next: "validate" - - id: "validate" - action: validate_{{context.collect}} + - action: validate_{{context.collect}} next: - if: "{{context.collect}} is not null" - then: "done" + then: "END" - else: "ask_collect" - id: "ask_collect" action: "{{context.utter}}" @@ -128,4 +138,11 @@ flows: - id: "listen" action: action_listen next: "start" - - id: "done" + + + pattern_code_change: + description: flow used to clean the stack after a bot update + name: pattern code change + steps: + - action: utter_inform_code_change + - action: action_clean_stack diff --git a/rasa/dialogue_understanding/patterns/search.py b/rasa/dialogue_understanding/patterns/search.py new file mode 100644 index 000000000000..d0adb09b8883 --- /dev/null +++ b/rasa/dialogue_understanding/patterns/search.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any, Dict +from rasa.shared.constants import RASA_DEFAULT_FLOW_PATTERN_PREFIX +from rasa.dialogue_understanding.stack.frames import PatternFlowStackFrame + + +FLOW_PATTERN_SEARCH = RASA_DEFAULT_FLOW_PATTERN_PREFIX + "search" + + +@dataclass +class SearchPatternFlowStackFrame(PatternFlowStackFrame): + """A stack frame that gets added to respond to knowledge-oriented questions.""" + + flow_id: str = FLOW_PATTERN_SEARCH + """The ID of the flow.""" + + @classmethod + def type(cls) -> str: + """Returns the type of the frame.""" + return FLOW_PATTERN_SEARCH + + @staticmethod + def from_dict(data: Dict[str, Any]) -> SearchPatternFlowStackFrame: + """Creates a `DialogueStackFrame` from a dictionary. + + Args: + data: The dictionary to create the `DialogueStackFrame` from. + + Returns: + The created `DialogueStackFrame`. + """ + return SearchPatternFlowStackFrame( + frame_id=data["frame_id"], + step_id=data["step_id"], + ) diff --git a/rasa/dialogue_understanding/processor/command_processor.py b/rasa/dialogue_understanding/processor/command_processor.py index 4b7ecf7cc46f..e80a9c0998b2 100644 --- a/rasa/dialogue_understanding/processor/command_processor.py +++ b/rasa/dialogue_understanding/processor/command_processor.py @@ -1,4 +1,4 @@ -from typing import List, Optional, Type +from typing import List, Optional, Type, Set, Dict import structlog from rasa.dialogue_understanding.commands import ( @@ -9,6 +9,9 @@ SetSlotCommand, FreeFormAnswerCommand, ) +from rasa.dialogue_understanding.commands.handle_code_change_command import ( + HandleCodeChangeCommand, +) from rasa.dialogue_understanding.patterns.collect_information import ( CollectInformationPatternFlowStackFrame, ) @@ -23,6 +26,7 @@ filled_slots_for_active_flow, top_flow_frame, ) +from rasa.shared.core.constants import FLOW_HASHES_SLOT from rasa.shared.core.events import Event, SlotSet from rasa.shared.core.flows.flow import ( FlowsList, @@ -95,6 +99,39 @@ def validate_state_of_commands(commands: List[Command]) -> None: assert sum(isinstance(c, CorrectSlotsCommand) for c in commands) <= 1 +def find_updated_flows(tracker: DialogueStateTracker, all_flows: FlowsList) -> Set[str]: + """Find the set of updated flows. + + Run through the current dialogue stack and compare the flow hashes of the + flows on the stack with those stored in the tracker. + + Args: + tracker: The tracker. + all_flows: All flows. + + Returns: + A set of flow ids of those flows that have changed + """ + stored_fingerprints: Dict[str, str] = tracker.get_slot(FLOW_HASHES_SLOT) or {} + dialogue_stack = DialogueStack.from_tracker(tracker) + + changed_flows = set() + for frame in dialogue_stack.frames: + if isinstance(frame, BaseFlowStackFrame): + flow = all_flows.flow_by_id(frame.flow_id) + if flow is None or ( + flow.id in stored_fingerprints + and flow.fingerprint != stored_fingerprints[flow.id] + ): + changed_flows.add(frame.flow_id) + return changed_flows + + +def calculate_flow_fingerprints(all_flows: FlowsList) -> Dict[str, str]: + """Calculate fingerprints for all flows.""" + return {flow.id: flow.fingerprint for flow in all_flows.underlying_flows} + + def execute_commands( tracker: DialogueStateTracker, all_flows: FlowsList ) -> List[Event]: @@ -113,7 +150,23 @@ def execute_commands( commands = clean_up_commands(commands, tracker, all_flows) - events: List[Event] = [] + updated_flows = find_updated_flows(tracker, all_flows) + if updated_flows: + # Override commands + structlogger.debug( + "command_executor.running_flows_were_updated", + updated_flow_ids=updated_flows, + ) + commands = [HandleCodeChangeCommand()] + + # store current flow hashes if they changed + new_hashes = calculate_flow_fingerprints(all_flows) + flow_hash_events: List[Event] = [] + if new_hashes != (tracker.get_slot(FLOW_HASHES_SLOT) or {}): + flow_hash_events.append(SlotSet(FLOW_HASHES_SLOT, new_hashes)) + tracker.update_with_events(flow_hash_events, None) + + events: List[Event] = flow_hash_events # commands need to be reversed to make sure they end up in the right order # on the stack. e.g. if there multiple start flow commands, the first one diff --git a/rasa/dialogue_understanding/stack/dialogue_stack.py b/rasa/dialogue_understanding/stack/dialogue_stack.py index 5059b919523d..45911b0207c6 100644 --- a/rasa/dialogue_understanding/stack/dialogue_stack.py +++ b/rasa/dialogue_understanding/stack/dialogue_stack.py @@ -6,6 +6,7 @@ from rasa.shared.core.constants import ( DIALOGUE_STACK_SLOT, ) +from rasa.shared.core.events import Event, SlotSet from rasa.shared.core.trackers import ( DialogueStateTracker, ) @@ -129,6 +130,10 @@ def get_persisted_stack(tracker: DialogueStateTracker) -> List[Dict[str, Any]]: The persisted stack as a dictionary.""" return tracker.get_slot(DIALOGUE_STACK_SLOT) or [] + def persist_as_event(self) -> Event: + """Returns the stack as a slot set event.""" + return SlotSet(DIALOGUE_STACK_SLOT, self.as_dict()) + @staticmethod def from_tracker(tracker: DialogueStateTracker) -> DialogueStack: """Creates a `DialogueStack` from a tracker. diff --git a/rasa/dialogue_understanding/stack/frames/flow_stack_frame.py b/rasa/dialogue_understanding/stack/frames/flow_stack_frame.py index ceeb4d5bfe39..20b7cfc6b4be 100644 --- a/rasa/dialogue_understanding/stack/frames/flow_stack_frame.py +++ b/rasa/dialogue_understanding/stack/frames/flow_stack_frame.py @@ -143,8 +143,8 @@ def from_dict(data: Dict[str, Any]) -> UserFlowStackFrame: The created `DialogueStackFrame`. """ return UserFlowStackFrame( - data["frame_id"], - data["flow_id"], - data["step_id"], - FlowStackFrameType.from_str(data.get("frame_type")), + frame_id=data["frame_id"], + flow_id=data["flow_id"], + step_id=data["step_id"], + frame_type=FlowStackFrameType.from_str(data.get("frame_type")), ) diff --git a/rasa/dialogue_understanding/stack/utils.py b/rasa/dialogue_understanding/stack/utils.py index 44c1675b82ef..71e59b90d4ba 100644 --- a/rasa/dialogue_understanding/stack/utils.py +++ b/rasa/dialogue_understanding/stack/utils.py @@ -5,7 +5,7 @@ from rasa.dialogue_understanding.stack.frames import BaseFlowStackFrame from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack from rasa.dialogue_understanding.stack.frames import UserFlowStackFrame -from rasa.shared.core.flows.flow import FlowsList +from rasa.shared.core.flows.flow import END_STEP, ContinueFlowStep, FlowsList def top_flow_frame( @@ -106,3 +106,21 @@ def user_flows_on_the_stack(dialogue_stack: DialogueStack) -> Set[str]: return { f.flow_id for f in dialogue_stack.frames if isinstance(f, UserFlowStackFrame) } + + +def end_top_user_flow(stack: DialogueStack) -> None: + """Ends all frames on top of the stack including the topmost user frame. + + Ends all flows until the next user flow is reached. This is useful + if you want to end all flows that are currently on the stack and + the user flow that triggered them. + + Args: + stack: The dialogue stack. + """ + + for frame in reversed(stack.frames): + if isinstance(frame, BaseFlowStackFrame): + frame.step_id = ContinueFlowStep.continue_step_for_id(END_STEP) + if isinstance(frame, UserFlowStackFrame): + break diff --git a/rasa/engine/recipes/default_recipe.py b/rasa/engine/recipes/default_recipe.py index ca47bf658fbd..49de80c4ee80 100644 --- a/rasa/engine/recipes/default_recipe.py +++ b/rasa/engine/recipes/default_recipe.py @@ -104,7 +104,7 @@ class ComponentType(Enum): COMMAND_GENERATOR = 7 name = "default.v1" - _registered_components: Dict[Text, RegisteredComponent] = {} + _registered_components: Dict[Text, RegisteredComponent] = {} # noqa: RUF012 def __init__(self) -> None: """Creates recipe.""" diff --git a/rasa/env.py b/rasa/env.py new file mode 100644 index 000000000000..3415487c3807 --- /dev/null +++ b/rasa/env.py @@ -0,0 +1,5 @@ +AUTH_TOKEN_ENV = "AUTH_TOKEN" +JWT_SECRET_ENV = "JWT_SECRET" +JWT_METHOD_ENV = "JWT_METHOD" +DEFAULT_JWT_METHOD = "HS256" +JWT_PRIVATE_KEY_ENV = "JWT_PRIVATE_KEY" diff --git a/rasa/model_testing.py b/rasa/model_testing.py index b07b3f75c4a6..7648b4ab6987 100644 --- a/rasa/model_testing.py +++ b/rasa/model_testing.py @@ -104,7 +104,6 @@ def _get_sanitized_model_directory(model_directory: Text) -> Text: Returns: The adjusted model_directory that should be used in `test_core_models_in_directory`. """ - p = Path(model_directory) if p.is_file(): if model_directory != rasa.model.get_latest_model(): @@ -246,7 +245,6 @@ async def compare_nlu_models( exclusion_percentages: List[int], ) -> None: """Trains multiple models, compares them and saves the results.""" - from rasa.nlu.test import drop_intents_below_freq from rasa.nlu.utils import write_json_to_file from rasa.utils.io import create_path diff --git a/rasa/nlu/classifiers/sklearn_intent_classifier.py b/rasa/nlu/classifiers/sklearn_intent_classifier.py index 5c941d3d8806..2cf30517fc41 100644 --- a/rasa/nlu/classifiers/sklearn_intent_classifier.py +++ b/rasa/nlu/classifiers/sklearn_intent_classifier.py @@ -106,8 +106,8 @@ def transform_labels_str2num(self, labels: List[Text]) -> np.ndarray: def transform_labels_num2str(self, y: np.ndarray) -> np.ndarray: """Transforms a list of strings into numeric label representation. - :param y: List of labels to convert to numeric representation""" - + :param y: List of labels to convert to numeric representation + """ return self.le.inverse_transform(y) def train(self, training_data: TrainingData) -> Resource: diff --git a/rasa/nlu/emulators/emulator.py b/rasa/nlu/emulators/emulator.py index e6cbfd8d0517..9870116e5abe 100644 --- a/rasa/nlu/emulators/emulator.py +++ b/rasa/nlu/emulators/emulator.py @@ -19,11 +19,13 @@ def normalise_request_json(self, data: Dict[Text, Any]) -> Dict[Text, Any]: The transformed input data. """ _data = { - "text": data["text"][0] if type(data["text"]) == list else data["text"] + "text": data["text"][0] + if type(data["text"]) == list # noqa: E721 + else data["text"] } if data.get("model"): - if type(data["model"]) == list: + if type(data["model"]) == list: # noqa: E721 _data["model"] = data["model"][0] else: _data["model"] = data["model"] diff --git a/rasa/nlu/extractors/crf_entity_extractor.py b/rasa/nlu/extractors/crf_entity_extractor.py index 1332c250d55a..a5e1e015ee9a 100644 --- a/rasa/nlu/extractors/crf_entity_extractor.py +++ b/rasa/nlu/extractors/crf_entity_extractor.py @@ -90,7 +90,7 @@ class CRFEntityExtractor(GraphComponent, EntityExtractorMixin): CONFIG_FEATURES = "features" - function_dict: Dict[Text, Callable[[CRFToken], Any]] = { + function_dict: Dict[Text, Callable[[CRFToken], Any]] = { # noqa: RUF012 CRFEntityExtractorOptions.LOW: lambda crf_token: crf_token.text.lower(), CRFEntityExtractorOptions.TITLE: lambda crf_token: crf_token.text.istitle(), CRFEntityExtractorOptions.PREFIX5: lambda crf_token: crf_token.text[:5], diff --git a/rasa/nlu/extractors/entity_synonyms.py b/rasa/nlu/extractors/entity_synonyms.py index 7c3765e752ba..a7ae5a034e1a 100644 --- a/rasa/nlu/extractors/entity_synonyms.py +++ b/rasa/nlu/extractors/entity_synonyms.py @@ -158,9 +158,9 @@ def _add_entities_if_synonyms(self, entity: Text, synonym: Optional[Text]) -> No ): rasa.shared.utils.io.raise_warning( f"Found conflicting synonym definitions " - f"for {repr(entity_lowercase)}. Overwriting target " - f"{repr(self.synonyms[entity_lowercase])} with " - f"{repr(synonym)}. " + f"for {entity_lowercase!r}. Overwriting target " + f"{self.synonyms[entity_lowercase]!r} with " + f"{synonym!r}. " f"Check your training data and remove " f"conflicting synonym definitions to " f"prevent this from happening.", diff --git a/rasa/nlu/extractors/extractor.py b/rasa/nlu/extractors/extractor.py index 8f38eb3bb45e..dec3895dd6e0 100644 --- a/rasa/nlu/extractors/extractor.py +++ b/rasa/nlu/extractors/extractor.py @@ -130,7 +130,6 @@ def filter_trainable_entities( `extractor` set to something other than self.name (e.g. 'CRFEntityExtractor') are removed. """ - filtered = [] for message in entity_examples: entities = [] @@ -157,7 +156,7 @@ def convert_predictions_into_entities( text: Text, tokens: List[Token], tags: Dict[Text, List[Text]], - split_entities_config: Dict[Text, bool] = None, + split_entities_config: Optional[Dict[Text, bool]] = None, confidences: Optional[Dict[Text, List[float]]] = None, ) -> List[Dict[Text, Any]]: """Convert predictions into entities. diff --git a/rasa/nlu/extractors/mitie_entity_extractor.py b/rasa/nlu/extractors/mitie_entity_extractor.py index 2a2705f665f4..15f8b9de2428 100644 --- a/rasa/nlu/extractors/mitie_entity_extractor.py +++ b/rasa/nlu/extractors/mitie_entity_extractor.py @@ -182,7 +182,7 @@ def _prepare_mitie_sample(training_example: Message) -> Any: except Exception as e: rasa.shared.utils.io.raise_warning( f"Failed to add entity example " - f"'{str(e)}' of sentence '{str(text)}'. " + f"'{e!s}' of sentence '{text!s}'. " f"Example will be ignored. Reason: " f"{e}" ) diff --git a/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py b/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py index f6c1536cd1d9..7c305e9738e8 100644 --- a/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py +++ b/rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py @@ -120,7 +120,7 @@ def _load_model_metadata(self) -> None: if self.model_name not in model_class_dict: raise KeyError( f"'{self.model_name}' not a valid model name. Choose from " - f"{str(list(model_class_dict.keys()))} or create" + f"{list(model_class_dict.keys())!s} or create" f"a new class inheriting from this class to support your model." ) @@ -528,6 +528,7 @@ def _add_extra_padding( This is only done if the input was truncated during the batch preparation of input for the model. + Args: sequence_embeddings: Embeddings returned from the model actual_sequence_lengths: original sequence length of all inputs diff --git a/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py b/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py index 92312197755a..dd930204ad24 100644 --- a/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py +++ b/rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py @@ -76,7 +76,9 @@ class LexicalSyntacticFeaturizer(SparseFeaturizer, GraphComponent): # NOTE: "suffix5" of the token "is" will be "is". Hence, when combining multiple # prefixes, short words will be represented/encoded repeatedly. - _FUNCTION_DICT: Dict[Text, Callable[[Token], Union[Text, bool, None]]] = { + _FUNCTION_DICT: Dict[ + Text, Callable[[Token], Union[Text, bool, None]] + ] = { # noqa: RUF012 "low": lambda token: token.text.islower(), "title": lambda token: token.text.istitle(), "prefix5": lambda token: token.text[:5], diff --git a/rasa/nlu/utils/__init__.py b/rasa/nlu/utils/__init__.py index a0a00fe4c124..2df0f21e1e18 100644 --- a/rasa/nlu/utils/__init__.py +++ b/rasa/nlu/utils/__init__.py @@ -10,7 +10,6 @@ def write_json_to_file(filename: Text, obj: Any, **kwargs: Any) -> None: def write_to_file(filename: Text, text: Any) -> None: """Write a text to a file.""" - rasa.shared.utils.io.write_text_file(str(text), filename) diff --git a/rasa/nlu/utils/bilou_utils.py b/rasa/nlu/utils/bilou_utils.py index 9f739a8c501e..fe6f23978542 100644 --- a/rasa/nlu/utils/bilou_utils.py +++ b/rasa/nlu/utils/bilou_utils.py @@ -252,8 +252,7 @@ def _add_bilou_tags_to_entities( def ensure_consistent_bilou_tagging( predicted_tags: List[Text], predicted_confidences: List[float] ) -> Tuple[List[Text], List[float]]: - """ - Ensure predicted tags follow the BILOU tagging schema. + """Ensure predicted tags follow the BILOU tagging schema. We assume that starting B- tags are correct. Followed tags that belong to start tag but have a different entity type are updated considering also the confidence @@ -269,7 +268,6 @@ def ensure_consistent_bilou_tagging( List of tags. List of confidences. """ - for idx, predicted_tag in enumerate(predicted_tags): prefix = bilou_prefix_from_tag(predicted_tag) tag = tag_without_prefix(predicted_tag) diff --git a/rasa/nlu/utils/hugging_face/transformers_pre_post_processors.py b/rasa/nlu/utils/hugging_face/transformers_pre_post_processors.py index 185ba43985ca..973cf9c8e54b 100644 --- a/rasa/nlu/utils/hugging_face/transformers_pre_post_processors.py +++ b/rasa/nlu/utils/hugging_face/transformers_pre_post_processors.py @@ -57,7 +57,6 @@ def gpt_tokens_pre_processor(token_ids: List[int]) -> List[int]: Returns: List of token ids augmented with special tokens. """ - return token_ids @@ -212,7 +211,6 @@ def roberta_embeddings_post_processor( Returns: sentence level embedding and post-processed sequence level embedding """ - post_processed_embedding = sequence_embeddings[1:-1] sentence_embedding = np.mean(post_processed_embedding, axis=0) @@ -222,7 +220,7 @@ def roberta_embeddings_post_processor( def xlm_embeddings_post_processor( sequence_embeddings: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: - """Post process embeddings from XLM models + """Post process embeddings from XLM models. by taking a mean over sequence embeddings and returning that as sentence representation. Remove first and last time steps diff --git a/rasa/shared/core/constants.py b/rasa/shared/core/constants.py index ed2d31b4cee5..368d1dff1176 100644 --- a/rasa/shared/core/constants.py +++ b/rasa/shared/core/constants.py @@ -41,6 +41,9 @@ ACTION_CLARIFY_FLOWS = "action_clarify_flows" ACTION_CORRECT_FLOW_SLOT = "action_correct_flow_slot" ACTION_RUN_SLOT_REJECTIONS_NAME = "action_run_slot_rejections" +ACTION_CLEAN_STACK = "action_clean_stack" +ACTION_TRIGGER_SEARCH = "action_trigger_search" +ACTION_TRIGGER_CHITCHAT = "action_trigger_chitchat" DEFAULT_ACTION_NAMES = [ @@ -62,6 +65,9 @@ ACTION_CORRECT_FLOW_SLOT, ACTION_CLARIFY_FLOWS, ACTION_RUN_SLOT_REJECTIONS_NAME, + ACTION_CLEAN_STACK, + ACTION_TRIGGER_SEARCH, + ACTION_TRIGGER_CHITCHAT, ] ACTION_SHOULD_SEND_DOMAIN = "send_domain" @@ -89,11 +95,9 @@ REQUESTED_SLOT = "requested_slot" DIALOGUE_STACK_SLOT = "dialogue_stack" RETURN_VALUE_SLOT = "return_value" +FLOW_HASHES_SLOT = "flow_hashes" -FLOW_SLOT_NAMES = [ - DIALOGUE_STACK_SLOT, - RETURN_VALUE_SLOT, -] +FLOW_SLOT_NAMES = [DIALOGUE_STACK_SLOT, RETURN_VALUE_SLOT, FLOW_HASHES_SLOT] # slots for knowledge base SLOT_LISTED_ITEMS = "knowledge_base_listed_objects" @@ -101,13 +105,18 @@ SLOT_LAST_OBJECT_TYPE = "knowledge_base_last_object_type" DEFAULT_KNOWLEDGE_BASE_ACTION = "action_query_knowledge_base" +KNOWLEDGE_BASE_SLOT_NAMES = { + SLOT_LISTED_ITEMS, + SLOT_LAST_OBJECT, + SLOT_LAST_OBJECT_TYPE, +} + DEFAULT_SLOT_NAMES = { REQUESTED_SLOT, DIALOGUE_STACK_SLOT, SESSION_START_METADATA_SLOT, - SLOT_LISTED_ITEMS, - SLOT_LAST_OBJECT, - SLOT_LAST_OBJECT_TYPE, + RETURN_VALUE_SLOT, + FLOW_HASHES_SLOT, } diff --git a/rasa/shared/core/conversation.py b/rasa/shared/core/conversation.py index 6b73b04dcfe6..55011c11f8cd 100644 --- a/rasa/shared/core/conversation.py +++ b/rasa/shared/core/conversation.py @@ -8,11 +8,12 @@ class Dialogue: - """A dialogue comprises a list of Turn objects""" + """A dialogue comprises a list of Turn objects.""" def __init__(self, name: Text, events: List["Event"]) -> None: """This function initialises the dialogue with the dialogue name and the event - list.""" + list. + """ self.name = name self.events = events @@ -24,7 +25,8 @@ def __str__(self) -> Text: def as_dict(self) -> Dict: """This function returns the dialogue as a dictionary to assist in - serialization.""" + serialization. + """ return {"events": [event.as_dict() for event in self.events], "name": self.name} @classmethod diff --git a/rasa/shared/core/domain.py b/rasa/shared/core/domain.py index c105926a09b2..62132cd2d5bf 100644 --- a/rasa/shared/core/domain.py +++ b/rasa/shared/core/domain.py @@ -37,12 +37,13 @@ IGNORED_INTENTS, RESPONSE_CONDITION, ) -import rasa.shared.core.constants from rasa.shared.core.constants import ( ACTION_SHOULD_SEND_DOMAIN, + SLOT_MAPPINGS, SlotMappingType, MAPPING_TYPE, MAPPING_CONDITIONS, + KNOWLEDGE_BASE_SLOT_NAMES, ) from rasa.shared.exceptions import ( RasaException, @@ -490,6 +491,13 @@ def collect_slots(slot_dict: Dict[Text, Any]) -> List[Slot]: slot_type = slot_dict[slot_name].pop("type", None) slot_class = Slot.resolve_by_type(slot_type) + if SLOT_MAPPINGS not in slot_dict[slot_name]: + logger.warning( + f"Slot '{slot_name}' has no mappings defined. " + f"We will continue with an empty list of mappings." + ) + slot_dict[slot_name][SLOT_MAPPINGS] = [] + slot = slot_class(slot_name, **slot_dict[slot_name]) slots.append(slot) return slots @@ -518,7 +526,7 @@ def _transform_intent_properties_for_internal_use( `used_entities` since this is the expected format of the intent when used internally. """ - name, properties = list(intent.items())[0] + name, properties = next(iter(intent.items())) if properties: properties.setdefault(USE_ENTITIES_KEY, True) @@ -709,7 +717,7 @@ def _intent_properties( } } else: - intent_name = list(intent.keys())[0] + intent_name = next(iter(intent.keys())) return ( intent_name, @@ -852,7 +860,7 @@ def _collect_overridden_default_intents( User-defined intents that are default intents. """ intent_names: Set[Text] = { - list(intent.keys())[0] if isinstance(intent, dict) else intent + next(iter(intent.keys())) if isinstance(intent, dict) else intent for intent in intents } return sorted( @@ -909,7 +917,7 @@ def _sort_intent_names_alphabetical_order( ) -> List[Union[Text, Dict]]: def sort(elem: Union[Text, Dict]) -> Union[Text, Dict]: if isinstance(elem, dict): - return list(elem.keys())[0] + return next(iter(elem.keys())) elif isinstance(elem, str): return elem @@ -1030,12 +1038,7 @@ def _add_knowledge_base_slots(self) -> None: ) ) slot_names = [slot.name for slot in self.slots] - knowledge_base_slots = [ - rasa.shared.core.constants.SLOT_LISTED_ITEMS, - rasa.shared.core.constants.SLOT_LAST_OBJECT, - rasa.shared.core.constants.SLOT_LAST_OBJECT_TYPE, - ] - for slot in knowledge_base_slots: + for slot in KNOWLEDGE_BASE_SLOT_NAMES: if slot not in slot_names: self.slots.append( TextSlot(slot, mappings=[], influence_conversation=False) @@ -1722,7 +1725,7 @@ def check_mappings( def get_exception_message( duplicates: Optional[List[Tuple[List[Text], Text]]] = None, - mappings: List[Tuple[Text, Text]] = None, + mappings: Optional[List[Tuple[Text, Text]]] = None, ) -> Text: """Return a message given a list of error locations.""" message = "" diff --git a/rasa/shared/core/flows/flow.py b/rasa/shared/core/flows/flow.py index 5eb5355ba4b0..365e52537118 100644 --- a/rasa/shared/core/flows/flow.py +++ b/rasa/shared/core/flows/flow.py @@ -1,6 +1,7 @@ from __future__ import annotations from dataclasses import dataclass +from functools import cached_property from typing import ( Any, Dict, @@ -16,7 +17,7 @@ import structlog from rasa.shared.core.trackers import DialogueStateTracker -from rasa.shared.constants import RASA_DEFAULT_FLOW_PATTERN_PREFIX +from rasa.shared.constants import RASA_DEFAULT_FLOW_PATTERN_PREFIX, UTTER_PREFIX from rasa.shared.exceptions import RasaException from rasa.shared.nlu.constants import ENTITY_ATTRIBUTE_TYPE, INTENT_NAME_KEY @@ -119,7 +120,7 @@ def __str__(self) -> Text: class UnresolvedFlowStepIdException(RasaException): - """Raised when a flow step is referenced but it's id can not be resolved.""" + """Raised when a flow step is referenced, but its id can not be resolved.""" def __init__( self, step_id: Text, flow: Flow, referenced_from: Optional[FlowStep] @@ -263,6 +264,11 @@ def non_pattern_flows(self) -> List[str]: All flows that can be started.""" return [f.id for f in self.underlying_flows if not f.is_handling_pattern()] + @property + def utterances(self) -> Set[str]: + """Retrieve all utterances of all flows""" + return set().union(*[flow.utterances for flow in self.underlying_flows]) + @dataclass class Flow: @@ -271,7 +277,7 @@ class Flow: id: Text """The id of the flow.""" name: Text - """The name of the flow.""" + """The human-readable name of the flow.""" description: Optional[Text] """The description of the flow.""" step_sequence: StepSequence @@ -291,11 +297,16 @@ def from_json(flow_id: Text, flow_config: Dict[Text, Any]) -> Flow: return Flow( id=flow_id, - name=flow_config.get("name", ""), + name=flow_config.get("name", Flow.create_default_name(flow_id)), description=flow_config.get("description"), step_sequence=Flow.resolve_default_ids(step_sequence), ) + @staticmethod + def create_default_name(flow_id: str) -> str: + """Create a default flow name for when it is missing.""" + return flow_id.replace("_", " ").replace("-", " ") + @staticmethod def resolve_default_ids(step_sequence: StepSequence) -> StepSequence: """Resolves the default ids of all steps in the sequence. @@ -535,6 +546,16 @@ def steps(self) -> List[FlowStep]: """Returns the steps of the flow.""" return self.step_sequence.steps + @cached_property + def fingerprint(self) -> str: + """Create a fingerprint identifying this step sequence.""" + return rasa.shared.utils.io.deep_container_fingerprint(self.as_json()) + + @property + def utterances(self) -> Set[str]: + """Retrieve all utterances of this flow""" + return set().union(*[step.utterances for step in self.step_sequence.steps]) + @dataclass class StepSequence: @@ -677,6 +698,11 @@ def default_id_postfix(self) -> str: """Returns the default id postfix of the flow step.""" raise NotImplementedError() + @property + def utterances(self) -> Set[str]: + """Return all the utterances used in this step""" + return set() + class InternalFlowStep(FlowStep): """Represents the configuration of a built-in flow step. @@ -810,6 +836,11 @@ def as_json(self) -> Dict[Text, Any]: def default_id_postfix(self) -> str: return self.action + @property + def utterances(self) -> Set[str]: + """Return all the utterances used in this step""" + return {self.action} if self.action.startswith(UTTER_PREFIX) else set() + @dataclass class BranchFlowStep(FlowStep): @@ -1297,6 +1328,11 @@ def default_id_postfix(self) -> str: """Returns the default id postfix of the flow step.""" return f"collect_{self.collect}" + @property + def utterances(self) -> Set[str]: + """Return all the utterances used in this step""" + return {self.utter} | {r.utter for r in self.rejections} + @dataclass class SetSlotsFlowStep(FlowStep): diff --git a/rasa/shared/core/flows/flows_yaml_schema.json b/rasa/shared/core/flows/flows_yaml_schema.json new file mode 100644 index 000000000000..dd5b08b2a89f --- /dev/null +++ b/rasa/shared/core/flows/flows_yaml_schema.json @@ -0,0 +1,286 @@ +{ + "type": "object", + "required": [ + "flows" + ], + "properties": { + "version": { + "type": "string" + }, + "flows": { + "type": "object", + "patternProperties": { + "^[A-Za-z_][A-Za-z0-9_]*$": { + "$ref": "#$defs/flow" + } + } + } + }, + "$defs": { + "steps": { + "type": "array", + "minContains": 1, + "items": { + "type": "object", + "oneOf": [ + { + "required": [ + "action" + ], + "additionalProperties": false, + "properties": { + "id": { + "type": "string" + }, + "action": { + "type": "string" + }, + "next": { + "$ref": "#$defs/next" + } + } + }, + { + "required": [ + "collect" + ], + "additionalProperties": false, + "properties": { + "id": { + "type": "string" + }, + "description":{ + "type": "string" + }, + "collect": { + "type": "string" + }, + "ask_before_filling": { + "type": "boolean" + }, + "reset_after_flow_ends": { + "type": "boolean" + }, + "utter": { + "type": "string" + }, + "rejections": { + "type": "array", + "items": { + "type": "object", + "required": [ + "if", + "utter" + ], + "properties": { + "if": { + "type": "string" + }, + "utter": { + "type": "string" + } + } + } + }, + "next": { + "$ref": "#$defs/next" + } + } + }, + { + "required": [ + "link" + ], + "additionalProperties": false, + "properties": { + "id": { + "type": "string" + }, + "link": { + "type": "string" + }, + "next": { + "type": "null" + } + } + }, + { + "required": [ + "set_slots" + ], + "additionalProperties": false, + "properties": { + "id": { + "type": "string" + }, + "set_slots": { + "$ref": "#$defs/set_slots" + }, + "next": { + "$ref": "#$defs/next" + } + } + }, + { + "required": [ + "next" + ], + "additionalProperties": false, + "properties": { + "next": { + "$ref": "#$defs/next" + }, + "id": { + "type": "string" + } + } + }, + { + "required": [ + "generation_prompt" + ], + "additionalProperties": false, + "properties": { + "generation_prompt": { + "type": "string" + }, + "id": { + "type": "string" + }, + "next": { + "$ref": "#$defs/next" + } + } + }, + { + "required": [ + "entry_prompt" + ], + "additionalProperties": false, + "properties": { + "id": { + "type": "string" + }, + "entry_prompt": { + "type": "string" + }, + "next": { + "$ref": "#$defs/next" + } + } + } + ] + } + }, + "flow": { + "required": [ + "steps" + ], + "type": "object", + "additionalProperties": false, + "properties": { + "description": { + "type": "string" + }, + "if": { + "type": "string" + }, + "name": { + "type": "string" + }, + "nlu_trigger": { + "type": "array", + "items": { + "required": [ + "intent" + ], + "type": "object", + "additionalProperties": false, + "properties": { + "intent": { + "type": "object", + "properties": { + "confidence_threshold": { + "type": "number" + }, + "name": { + "type": "string" + } + }, + "required": [ + "name" + ] + } + } + } + }, + "steps": { + "$ref": "#$defs/steps" + } + } + }, + "next": { + "anyOf": [ + { + "type": "array", + "minContains": 1, + "items": { + "type": "object", + "oneOf": [ + { + "required": [ + "if", + "then" + ] + }, + { + "required": [ + "else" + ] + } + ], + "properties": { + "else": { + "oneOf": [ + { + "type": "string" + }, + { + "$ref": "#$defs/steps" + } + ] + }, + "if": { + "type": "string" + }, + "then": { + "oneOf": [ + { + "$ref": "#$defs/steps" + }, + { + "type": "string" + } + ] + } + } + } + }, + { + "type": "string" + } + ] + }, + "set_slots": { + "type": "array", + "items": { + "type": "object", + "patternProperties": { + "^[A-Za-z_][A-Za-z0-9_]*$": { + "type": ["string", "null", "boolean", "number"] + } + } + } + } + } +} diff --git a/rasa/shared/core/flows/flows_yaml_schema.yml b/rasa/shared/core/flows/flows_yaml_schema.yml deleted file mode 100644 index af6f24a468c3..000000000000 --- a/rasa/shared/core/flows/flows_yaml_schema.yml +++ /dev/null @@ -1,8 +0,0 @@ -allowempty: True -mapping: - version: - type: "str" - required: False - allowempty: False - flows: - type: "any" diff --git a/rasa/shared/core/flows/utils.py b/rasa/shared/core/flows/utils.py index 86ede4f9aa4a..250efb93720c 100644 --- a/rasa/shared/core/flows/utils.py +++ b/rasa/shared/core/flows/utils.py @@ -3,7 +3,6 @@ import rasa.shared.data import rasa.shared.utils.io - KEY_FLOWS = "flows" diff --git a/rasa/shared/core/flows/yaml_flows_io.py b/rasa/shared/core/flows/yaml_flows_io.py index 2ae963ae1a71..14cf7da82557 100644 --- a/rasa/shared/core/flows/yaml_flows_io.py +++ b/rasa/shared/core/flows/yaml_flows_io.py @@ -1,5 +1,7 @@ +import textwrap from pathlib import Path from typing import List, Text, Union + from rasa.shared.core.flows.utils import KEY_FLOWS import rasa.shared.utils.io @@ -8,7 +10,7 @@ from rasa.shared.core.flows.flow import Flow, FlowsList -FLOWS_SCHEMA_FILE = "/shared/core/flows/flows_yaml_schema.yml" +FLOWS_SCHEMA_FILE = "shared/core/flows/flows_yaml_schema.json" class YAMLFlowsReader: @@ -52,7 +54,9 @@ def read_from_string(cls, string: Text, skip_validation: bool = False) -> FlowsL `Flow`s read from `string`. """ if not skip_validation: - rasa.shared.utils.validation.validate_yaml_schema(string, FLOWS_SCHEMA_FILE) + rasa.shared.utils.validation.validate_yaml_with_jsonschema( + string, FLOWS_SCHEMA_FILE + ) yaml_content = rasa.shared.utils.io.read_yaml(string) @@ -91,3 +95,8 @@ def dump(flows: List[Flow], filename: Union[Text, Path]) -> None: filename: The path to the file to write to. """ rasa.shared.utils.io.write_text_file(YamlFlowsWriter.dumps(flows), filename) + + +def flows_from_str(yaml_str: str) -> FlowsList: + """Reads flows from a YAML string.""" + return YAMLFlowsReader.read_from_string(textwrap.dedent(yaml_str)) diff --git a/rasa/shared/core/generator.py b/rasa/shared/core/generator.py index e1b8ffce1abf..169335d80851 100644 --- a/rasa/shared/core/generator.py +++ b/rasa/shared/core/generator.py @@ -573,7 +573,6 @@ def _subsample_trackers( max_number_of_trackers: int, ) -> List[TrackerWithCachedStates]: """Subsample the list of trackers to retrieve a random subset.""" - # if flows get very long and have a lot of forks we # get into trouble by collecting too many trackers # hence the sub sampling @@ -585,7 +584,7 @@ def _subsample_trackers( return incoming_trackers def _find_start_checkpoint_name(self, end_name: Text) -> Text: - """Find start checkpoint name given end checkpoint name of a cycle""" + """Find start checkpoint name given end checkpoint name of a cycle.""" return self.story_graph.story_end_checkpoints.get(end_name, end_name) @staticmethod @@ -595,9 +594,8 @@ def _add_unused_end_checkpoints( used_checkpoints: Set[Text], ) -> Set[Text]: """Add unused end checkpoints - if they were never encountered as start checkpoints + if they were never encountered as start checkpoints. """ - return unused_checkpoints.union( { start_name @@ -611,7 +609,8 @@ def _filter_active_trackers( active_trackers: TrackerLookupDict, unused_checkpoints: Set[Text] ) -> TrackerLookupDict: """Filter active trackers that ended with unused checkpoint - or are parts of loops.""" + or are parts of loops. + """ next_active_trackers = defaultdict(list) for start_name in unused_checkpoints: @@ -667,8 +666,8 @@ def _process_step( The trackers that reached the steps starting checkpoint will be used to process the events. Collects and returns training - data while processing the story step.""" - + data while processing the story step. + """ events = step.explicit_events(self.domain) trackers = [] @@ -739,8 +738,8 @@ def _remove_duplicate_trackers( we only need to keep one. Because as we continue processing events and story steps, all trackers that created the same featurization once will do so in the future (as we - feed the same events to all trackers).""" - + feed the same events to all trackers). + """ step_hashed_featurizations = set() # collected trackers that created different featurizations @@ -780,8 +779,8 @@ def _remove_duplicate_story_end_trackers( self, trackers: List[TrackerWithCachedStates] ) -> List[TrackerWithCachedStates]: """Removes trackers that reached story end and - created equal featurizations.""" - + created equal featurizations. + """ # collected trackers that created different featurizations unique_trackers = [] # for all steps @@ -811,8 +810,8 @@ def _mark_first_action_in_story_steps_as_unpredictable(self) -> None: contain action listen events (they are added when a story gets converted to a dialogue) we need to apply a small trick to avoid marking actions occurring after - an action listen as unpredictable.""" - + an action listen as unpredictable. + """ for step in self.story_graph.story_steps: # TODO: this does not work if a step is the conversational start # as well as an intermediary part of a conversation. @@ -840,8 +839,8 @@ def _issue_unused_checkpoint_notification( """Warns about unused story blocks. Unused steps are ones having a start or end checkpoint - that no one provided.""" - + that no one provided. + """ if STORY_START in unused_checkpoints: rasa.shared.utils.io.raise_warning( "There is no starting story block " diff --git a/rasa/shared/core/slot_mappings.py b/rasa/shared/core/slot_mappings.py index 44d19a4cd5c7..57130cccb61d 100644 --- a/rasa/shared/core/slot_mappings.py +++ b/rasa/shared/core/slot_mappings.py @@ -229,7 +229,7 @@ def validate_slot_mappings(domain_slots: Dict[Text, Any]) -> None: ) for slot_name, properties in domain_slots.items(): - mappings = properties.get(SLOT_MAPPINGS) + mappings = properties.get(SLOT_MAPPINGS, []) for slot_mapping in mappings: SlotMapping.validate(slot_mapping, slot_name) diff --git a/rasa/shared/core/slots.py b/rasa/shared/core/slots.py index a28c8538679d..b6f6eb44415b 100644 --- a/rasa/shared/core/slots.py +++ b/rasa/shared/core/slots.py @@ -78,7 +78,8 @@ def has_features(self) -> bool: def value_reset_delay(self) -> Optional[int]: """After how many turns the slot should be reset to the initial_value. - If the delay is set to `None`, the slot will keep its value forever.""" + If the delay is set to `None`, the slot will keep its value forever. + """ # TODO: FUTURE this needs to be implemented - slots are not reset yet return self._value_reset_delay diff --git a/rasa/shared/core/trackers.py b/rasa/shared/core/trackers.py index 6e3361638ed3..415dbe897448 100644 --- a/rasa/shared/core/trackers.py +++ b/rasa/shared/core/trackers.py @@ -754,7 +754,7 @@ def export_stories_to_file(self, export_path: Text = "debug_stories.yml") -> Non def get_last_event_for( self, event_type: Union[Type["EventTypeAlias"], Tuple[Type["EventTypeAlias"], ...]], - action_names_to_exclude: List[Text] = None, + action_names_to_exclude: Optional[List[Text]] = None, skip: int = 0, event_verbosity: EventVerbosity = EventVerbosity.APPLIED, ) -> Optional["EventTypeAlias"]: diff --git a/rasa/shared/core/training_data/story_writer/story_writer.py b/rasa/shared/core/training_data/story_writer/story_writer.py index d82787d120b5..5c404a3ce7e3 100644 --- a/rasa/shared/core/training_data/story_writer/story_writer.py +++ b/rasa/shared/core/training_data/story_writer/story_writer.py @@ -27,6 +27,7 @@ def dumps( the existing story file. is_test_story: Identifies if the stories should be exported in test stories format. + Returns: String with story steps in the desired format. """ diff --git a/rasa/shared/core/training_data/structures.py b/rasa/shared/core/training_data/structures.py index 78d27057de5d..79898ea09550 100644 --- a/rasa/shared/core/training_data/structures.py +++ b/rasa/shared/core/training_data/structures.py @@ -378,7 +378,9 @@ def add_event_as_condition(self, event: Event) -> None: class Story: def __init__( - self, story_steps: List[StoryStep] = None, story_name: Optional[Text] = None + self, + story_steps: Optional[List[StoryStep]] = None, + story_name: Optional[Text] = None, ) -> None: self.story_steps = story_steps if story_steps else [] self.story_name = story_name diff --git a/rasa/shared/exceptions.py b/rasa/shared/exceptions.py index 3150a0b6aabf..57cef0423c8b 100644 --- a/rasa/shared/exceptions.py +++ b/rasa/shared/exceptions.py @@ -32,7 +32,8 @@ def __init__(self, filename: Optional[Text] = None) -> None: """Create exception. Args: - filename: optional file the error occurred in""" + filename: optional file the error occurred in + """ self.filename = filename diff --git a/rasa/shared/importers/importer.py b/rasa/shared/importers/importer.py index a03c92298940..9cd5904d43ee 100644 --- a/rasa/shared/importers/importer.py +++ b/rasa/shared/importers/importer.py @@ -405,15 +405,19 @@ def load_default_pattern_flows_domain() -> Domain: return Domain.from_path(default_flows_file) - @rasa.shared.utils.common.cached_method - def get_flows(self) -> FlowsList: - flows = self._importer.get_flows() + @classmethod + def merge_with_default_flows(cls, flows: FlowsList) -> FlowsList: + """Merges the passed flows with the default flows. - if flows.is_empty(): - # if there are no flows, we don't need to add the default flows either - return flows + If a user defined flow contains a flow with an id of a default flow, + it will overwrite the default flow. + + Args: + flows: user defined flows. - default_flows = self.load_default_pattern_flows() + Returns: + Merged flows.""" + default_flows = cls.load_default_pattern_flows() user_flow_ids = [flow.id for flow in flows.underlying_flows] missing_default_flows = [ @@ -424,6 +428,16 @@ def get_flows(self) -> FlowsList: return flows.merge(FlowsList(missing_default_flows)) + @rasa.shared.utils.common.cached_method + def get_flows(self) -> FlowsList: + flows = self._importer.get_flows() + + if flows.is_empty(): + # if there are no flows, we don't need to add the default flows either + return flows + + return self.merge_with_default_flows(flows) + @rasa.shared.utils.common.cached_method def get_domain(self) -> Domain: """Merge existing domain with properties of flows.""" diff --git a/rasa/shared/importers/multi_project.py b/rasa/shared/importers/multi_project.py index 7b5f443dfb86..faceee970241 100644 --- a/rasa/shared/importers/multi_project.py +++ b/rasa/shared/importers/multi_project.py @@ -122,7 +122,6 @@ def no_skills_selected(self) -> bool: def training_paths(self) -> Set[Text]: """Returns the paths which should be searched for training data.""" - # only include extra paths if they are not part of the current project directory training_paths = { i @@ -136,8 +135,8 @@ def training_paths(self) -> Set[Text]: return training_paths def is_imported(self, path: Text) -> bool: - """ - Checks whether a path is imported by a skill. + """Checks whether a path is imported by a skill. + Args: path: File or directory path which should be checked. diff --git a/rasa/shared/nlu/training_data/formats/rasa_yaml.py b/rasa/shared/nlu/training_data/formats/rasa_yaml.py index b31253bd493f..7aa3190f0177 100644 --- a/rasa/shared/nlu/training_data/formats/rasa_yaml.py +++ b/rasa/shared/nlu/training_data/formats/rasa_yaml.py @@ -61,7 +61,8 @@ def __init__(self) -> None: def validate(self, string: Text) -> None: """Check if the string adheres to the NLU yaml data schema. - If the string is not in the right format, an exception will be raised.""" + If the string is not in the right format, an exception will be raised. + """ try: validation.validate_yaml_schema(string, NLU_SCHEMA_FILE) except YamlException as e: diff --git a/rasa/shared/nlu/training_data/loading.py b/rasa/shared/nlu/training_data/loading.py index 4b05e616626f..194e9b25ce17 100644 --- a/rasa/shared/nlu/training_data/loading.py +++ b/rasa/shared/nlu/training_data/loading.py @@ -44,7 +44,8 @@ def load_data(resource_name: Text, language: Optional[Text] = "en") -> "TrainingData": """Load training data from disk. - Merges them if loaded from disk and multiple files are found.""" + Merges them if loaded from disk and multiple files are found. + """ if not os.path.exists(resource_name): raise ValueError(f"File '{resource_name}' does not exist.") @@ -91,7 +92,6 @@ def _reader_factory(fformat: Text) -> Optional["TrainingDataReader"]: def _load(filename: Text, language: Optional[Text] = "en") -> Optional["TrainingData"]: """Loads a single training data file from disk.""" - fformat = guess_format(filename) if fformat == UNK: raise ValueError(f"Unknown data format for file '{filename}'.") diff --git a/rasa/shared/nlu/training_data/message.py b/rasa/shared/nlu/training_data/message.py index ed7bde2cbb67..2f06a97c7832 100644 --- a/rasa/shared/nlu/training_data/message.py +++ b/rasa/shared/nlu/training_data/message.py @@ -103,8 +103,7 @@ def get(self, prop: Text, default: Optional[Any] = None) -> Any: return self.data.get(prop, default) def as_dict_nlu(self) -> dict: - """Get dict representation of message as it would appear in training data""" - + """Get dict representation of message as it would appear in training data.""" d = self.as_dict() if d.get(INTENT, None): d[INTENT] = self.get_full_intent() @@ -196,8 +195,7 @@ def build( return cls(data, **kwargs) def get_full_intent(self) -> Text: - """Get intent as it appears in training data""" - + """Get intent as it appears in training data.""" return ( self.get(INTENT_RESPONSE_KEY) if self.get(INTENT_RESPONSE_KEY) diff --git a/rasa/shared/nlu/training_data/training_data.py b/rasa/shared/nlu/training_data/training_data.py index 7e7df9014035..595b53056915 100644 --- a/rasa/shared/nlu/training_data/training_data.py +++ b/rasa/shared/nlu/training_data/training_data.py @@ -171,7 +171,6 @@ def filter_training_examples( Returns: TrainingData: A TrainingData with filtered training examples. """ - return TrainingData( list(filter(condition, self.training_examples)), self.entity_synonyms, @@ -195,7 +194,6 @@ def sanitize_examples(examples: List[Message]) -> List[Message]: Remove trailing whitespaces from intent and response annotations and drop duplicate examples. """ - for ex in examples: if ex.get(INTENT): ex.set(INTENT, ex.get(INTENT).strip()) @@ -305,7 +303,6 @@ def entity_roles_groups_used(self) -> bool: @lazy_property def number_of_examples_per_entity(self) -> Dict[Text, int]: """Calculates the number of examples per entity.""" - entities = [] def _append_entity(entity: Dict[Text, Any], attribute: Text) -> None: @@ -322,7 +319,7 @@ def _append_entity(entity: Dict[Text, Any], attribute: Text) -> None: return dict(Counter(entities)) def sort_regex_features(self) -> None: - """Sorts regex features lexicographically by name+pattern""" + """Sorts regex features lexicographically by name+pattern.""" self.regex_features = sorted( self.regex_features, key=lambda e: "{}+{}".format(e["name"], e["pattern"]) ) @@ -432,8 +429,8 @@ def persist( self, dir_name: Text, filename: Text = DEFAULT_TRAINING_DATA_OUTPUT_PATH ) -> Dict[Text, Any]: """Persists this training data to disk and returns necessary - information to load it again.""" - + information to load it again. + """ if not os.path.exists(dir_name): os.makedirs(dir_name) @@ -445,7 +442,6 @@ def persist( def sorted_entities(self) -> List[Any]: """Extract all entities from examples and sorts them by entity type.""" - entity_examples = [ entity for ex in self.entity_examples for entity in ex.get("entities") ] @@ -505,8 +501,8 @@ def train_test_split( self, train_frac: float = 0.8, random_seed: Optional[int] = None ) -> Tuple["TrainingData", "TrainingData"]: """Split into a training and test dataset, - preserving the fraction of examples per intent.""" - + preserving the fraction of examples per intent. + """ # collect all nlu data test, train = self.split_nlu_examples(train_frac, random_seed) @@ -543,7 +539,6 @@ def _needed_responses_for_examples( Returns: All responses that appear at least once in the list of examples. """ - responses = {} for ex in examples: if ex.get(INTENT_RESPONSE_KEY) and ex.get(RESPONSE): @@ -563,7 +558,6 @@ def split_nlu_examples( Returns: Test and training examples. """ - self.validate() # Stratified split: both test and train should have (approximately) the diff --git a/rasa/shared/nlu/training_data/util.py b/rasa/shared/nlu/training_data/util.py index 56ff34b065ba..eb8b6ff68313 100644 --- a/rasa/shared/nlu/training_data/util.py +++ b/rasa/shared/nlu/training_data/util.py @@ -33,7 +33,7 @@ def transform_entity_synonyms( synonyms: List[Dict[Text, Any]], known_synonyms: Optional[Dict[Text, Any]] = None ) -> Dict[Text, Any]: - """Transforms the entity synonyms into a text->value dictionary""" + """Transforms the entity synonyms into a text->value dictionary.""" entity_synonyms = known_synonyms if known_synonyms else {} for s in synonyms: if "value" in s and "synonyms" in s: @@ -54,8 +54,7 @@ def check_duplicate_synonym( def get_file_format_extension(resource_name: Text) -> Text: - """ - Get the file extension based on training data format. It supports both a folder and + """Get the file extension based on training data format. It supports both a folder and a file, and tries to guess the format as follows: - if the resource is a file and has a known format, return this format's extension @@ -65,9 +64,10 @@ def get_file_format_extension(resource_name: Text) -> Text: Args: resource_name: The name of the resource, can be a file or a folder. + Returns: The resource file format. - """ + """ # noqa: E501 from rasa.shared.nlu.training_data import loading if resource_name is None or not os.path.exists(resource_name): @@ -97,7 +97,6 @@ def remove_untrainable_entities_from(example: Dict[Text, Any]) -> None: Args: example: Serialised training example to inspect. """ - example_entities = example.get(ENTITIES) if not example_entities: @@ -193,7 +192,6 @@ def build_entity( Returns: an entity dictionary """ - entity = { ENTITY_ATTRIBUTE_START: start, ENTITY_ATTRIBUTE_END: end, diff --git a/rasa/shared/utils/common.py b/rasa/shared/utils/common.py index 9f069aad7060..1d33139ec5d5 100644 --- a/rasa/shared/utils/common.py +++ b/rasa/shared/utils/common.py @@ -67,7 +67,7 @@ def module_path_from_instance(inst: Any) -> Text: def sort_list_of_dicts_by_first_key(dicts: List[Dict]) -> List[Dict]: """Sorts a list of dictionaries by their first key.""" - return sorted(dicts, key=lambda d: list(d.keys())[0]) + return sorted(dicts, key=lambda d: next(iter(d.keys()))) def lazy_property(function: Callable) -> Any: @@ -172,7 +172,6 @@ def minimal_kwargs( Subset of kwargs which are accepted by `func`. """ - excluded_keys = excluded_keys or [] possible_arguments = arguments_of(func) @@ -186,7 +185,6 @@ def minimal_kwargs( def mark_as_experimental_feature(feature_name: Text) -> None: """Warns users that they are using an experimental feature.""" - logger.warning( f"The {feature_name} is currently experimental and might change or be " "removed in the future 🔬 Please share your feedback on it in the " diff --git a/rasa/shared/utils/llm.py b/rasa/shared/utils/llm.py index 5ee795da6650..5cf22b85883f 100644 --- a/rasa/shared/utils/llm.py +++ b/rasa/shared/utils/llm.py @@ -10,6 +10,7 @@ from rasa.shared.core.trackers import DialogueStateTracker from rasa.shared.core.events import BotUttered, UserUttered from rasa.shared.engine.caching import get_local_cache_location +import rasa.shared.utils.io structlogger = structlog.get_logger() @@ -208,3 +209,22 @@ def embedder_factory( return embeddings_cls(**parameters) else: raise ValueError(f"Unsupported embeddings type '{typ}'") + + +def get_prompt_template( + jinja_file_path: Optional[Text], default_prompt_template: Text +) -> Text: + """Returns the prompt template. + + Args: + jinja_file_path: the path to the jinja file + default_prompt_template: the default prompt template + + Returns: + The prompt template. + """ + return ( + rasa.shared.utils.io.read_file(jinja_file_path) + if jinja_file_path is not None + else default_prompt_template + ) diff --git a/rasa/shared/utils/pykwalify_extensions.py b/rasa/shared/utils/pykwalify_extensions.py index 5d998208b059..4ac68b2a045e 100644 --- a/rasa/shared/utils/pykwalify_extensions.py +++ b/rasa/shared/utils/pykwalify_extensions.py @@ -1,5 +1,4 @@ -""" -This module regroups custom validation functions, and it is +"""This module regroups custom validation functions, and it is loaded as an extension of the pykwalify library: https://pykwalify.readthedocs.io/en/latest/extensions.html#extensions diff --git a/rasa/shared/utils/schemas/domain.yml b/rasa/shared/utils/schemas/domain.yml index bd615c9b9161..134512b25596 100644 --- a/rasa/shared/utils/schemas/domain.yml +++ b/rasa/shared/utils/schemas/domain.yml @@ -77,7 +77,7 @@ mapping: required: False mappings: type: "seq" - required: True + required: False allowempty: False sequence: - type: "map" diff --git a/rasa/shared/utils/validation.py b/rasa/shared/utils/validation.py index 04a0d5b43da6..a91ff4c7a37a 100644 --- a/rasa/shared/utils/validation.py +++ b/rasa/shared/utils/validation.py @@ -289,3 +289,44 @@ def validate_training_data_format_version( docs=DOCS_URL_TRAINING_DATA, ) return False + + +def validate_yaml_with_jsonschema( + yaml_file_content: Text, schema_path: Text, package_name: Text = PACKAGE_NAME +) -> None: + """Validate data format. + + Args: + yaml_file_content: the content of the yaml file to be validated + schema_path: the schema of the yaml file + package_name: the name of the package the schema is located in. defaults + to `rasa`. + + Raises: + YamlSyntaxException: if the yaml file is not valid. + SchemaValidationError: if validation fails. + """ + from jsonschema import validate, ValidationError + from ruamel.yaml import YAMLError + import pkg_resources + + schema_file = pkg_resources.resource_filename(package_name, schema_path) + schema_content = rasa.shared.utils.io.read_json_file(schema_file) + + try: + # we need "rt" since + # it will add meta information to the parsed output. this meta information + # will include e.g. at which line an object was parsed. this is very + # helpful when we validate files later on and want to point the user to the + # right line + source_data = rasa.shared.utils.io.read_yaml( + yaml_file_content, reader_type=["safe", "rt"] + ) + except (YAMLError, DuplicateKeyError) as e: + raise YamlSyntaxException(underlying_yaml_exception=e) + + try: + validate(source_data, schema_content) + except ValidationError as error: + error.message += ". Failed to validate data, make sure your data is valid." + raise SchemaValidationError.create_from(error) from error diff --git a/rasa/utils/common.py b/rasa/utils/common.py index 216d77356461..59d3e6bd3905 100644 --- a/rasa/utils/common.py +++ b/rasa/utils/common.py @@ -363,7 +363,7 @@ def update_rabbitmq_log_level(library_log_level: Text) -> None: def sort_list_of_dicts_by_first_key(dicts: List[Dict]) -> List[Dict]: """Sorts a list of dictionaries by their first key.""" - return sorted(dicts, key=lambda d: list(d.keys())[0]) + return sorted(dicts, key=lambda d: next(iter(d.keys()))) def write_global_config_value(name: Text, value: Any) -> bool: diff --git a/rasa/utils/converter.py b/rasa/utils/converter.py index 023682f56359..c52f8e8922d3 100644 --- a/rasa/utils/converter.py +++ b/rasa/utils/converter.py @@ -48,5 +48,6 @@ def generate_path_for_converted_training_data_file( @classmethod def converted_file_suffix(cls) -> Text: """Returns suffix that should be appended to the converted - training data file.""" + training data file. + """ return "_converted.yml" diff --git a/rasa/utils/endpoints.py b/rasa/utils/endpoints.py index cffc7523a2e3..d74ca995a5e8 100644 --- a/rasa/utils/endpoints.py +++ b/rasa/utils/endpoints.py @@ -19,9 +19,10 @@ def read_endpoint_config( filename: Text, endpoint_type: Text ) -> Optional["EndpointConfig"]: - """Read an endpoint configuration file from disk and extract one + """Read an endpoint configuration file from disk and extract one. - config.""" + config. + """ if not filename: return None diff --git a/rasa/utils/io.py b/rasa/utils/io.py index 3388ef98b049..8df382da050c 100644 --- a/rasa/utils/io.py +++ b/rasa/utils/io.py @@ -124,7 +124,6 @@ def create_temporary_directory() -> Text: def create_path(file_path: Text) -> None: """Makes sure all directories in the 'file_path' exists.""" - parent_dir = os.path.dirname(os.path.abspath(file_path)) if not os.path.exists(parent_dir): os.makedirs(parent_dir) @@ -160,8 +159,8 @@ def create_validator( function: Callable[[Text], bool], error_message: Text ) -> Type["Validator"]: """Helper method to create `Validator` classes from callable functions. Should be - removed when questionary supports `Validator` objects.""" - + removed when questionary supports `Validator` objects. + """ from prompt_toolkit.validation import Validator, ValidationError from prompt_toolkit.document import Document diff --git a/rasa/utils/plotting.py b/rasa/utils/plotting.py index bc4fca82ea47..54ca39ac023f 100644 --- a/rasa/utils/plotting.py +++ b/rasa/utils/plotting.py @@ -41,7 +41,7 @@ def _fix_matplotlib_backend() -> None: elif backend is None: # pragma: no cover try: # If the `tkinter` package is available, we can use the `TkAgg` backend - import tkinter + import tkinter # noqa: F401 logger.debug("Setting matplotlib backend to 'TkAgg'") matplotlib.use("TkAgg") diff --git a/rasa/utils/tensorflow/crf.py b/rasa/utils/tensorflow/crf.py index 1318eedd9c3b..018f62924e5f 100644 --- a/rasa/utils/tensorflow/crf.py +++ b/rasa/utils/tensorflow/crf.py @@ -226,6 +226,7 @@ def crf_unary_score( tag_indices: A [batch_size, max_seq_len] matrix of tag indices. sequence_lengths: A [batch_size] vector of true sequence lengths. inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials. + Returns: unary_scores: A [batch_size] vector of unary scores. """ @@ -266,6 +267,7 @@ def crf_binary_score( tag_indices: A [batch_size, max_seq_len] matrix of tag indices. sequence_lengths: A [batch_size] vector of true sequence lengths. transition_params: A [num_tags, num_tags] matrix of binary potentials. + Returns: binary_scores: A [batch_size] vector of binary scores. """ @@ -310,6 +312,7 @@ def crf_sequence_score( we compute the unnormalized score. sequence_lengths: A [batch_size] vector of true sequence lengths. transition_params: A [num_tags, num_tags] transition matrix. + Returns: sequence_scores: A [batch_size] vector of unnormalized sequence scores. """ @@ -403,6 +406,7 @@ def crf_log_norm( to use as input to the CRF layer. sequence_lengths: A [batch_size] vector of true sequence lengths. transition_params: A [num_tags, num_tags] transition matrix. + Returns: log_norm: A [batch_size] vector of normalizers for a CRF. """ @@ -457,6 +461,7 @@ def crf_log_likelihood( sequence_lengths: A [batch_size] vector of true sequence lengths. transition_params: A [num_tags, num_tags] transition matrix, if available. + Returns: log_likelihood: A [batch_size] `Tensor` containing the log-likelihood of each example, given the sequence of tag indices. diff --git a/rasa/utils/tensorflow/data_generator.py b/rasa/utils/tensorflow/data_generator.py index 9157ea7252ca..a696f607c026 100644 --- a/rasa/utils/tensorflow/data_generator.py +++ b/rasa/utils/tensorflow/data_generator.py @@ -1,3 +1,4 @@ +import math from typing import List, Union, Text, Optional, Any, Tuple, Dict, cast import logging @@ -380,7 +381,11 @@ def __len__(self) -> int: # data was rebalanced, so need to recalculate number of examples num_examples = self.model_data.number_of_examples(self._data) batch_size = self._current_batch_size - return num_examples // batch_size + int(num_examples % batch_size > 0) + # keep last batch only if it has at least half a batch size of examples + last_batch_half_full = num_examples % batch_size >= math.ceil(batch_size / 2) + num_batches = num_examples // batch_size + int(last_batch_half_full) + # Return at least 1 if there is an example + return max(num_batches, int(num_examples > 0)) def __getitem__(self, index: int) -> Tuple[Any, Any]: """Gets batch at position `index`. diff --git a/rasa/utils/tensorflow/environment.py b/rasa/utils/tensorflow/environment.py index 15984d980bea..74c5ad3cd5f4 100644 --- a/rasa/utils/tensorflow/environment.py +++ b/rasa/utils/tensorflow/environment.py @@ -53,7 +53,6 @@ def _allocate_gpu_memory( logical_memory: Absolute amount of memory to be allocated to the new logical device. """ - from tensorflow import config as tf_config try: @@ -84,7 +83,6 @@ def _parse_gpu_config(gpu_memory_config: Text) -> Dict[int, int]: Parsed configuration as a dictionary with GPU IDs as keys and requested memory as the value. """ - # gpu_config is of format "gpu_id_1:gpu_id_1_memory, gpu_id_2: gpu_id_2_memory" # Parse it and store in a dictionary parsed_gpu_config: Dict[int, int] = {} diff --git a/rasa/utils/tensorflow/layers.py b/rasa/utils/tensorflow/layers.py index 6ba29ec2a32f..7fbc9590f3c5 100644 --- a/rasa/utils/tensorflow/layers.py +++ b/rasa/utils/tensorflow/layers.py @@ -67,7 +67,6 @@ def call( Raises: A ValueError if inputs is not a sparse tensor """ - if not isinstance(inputs, tf.SparseTensor): raise ValueError("Input tensor should be sparse.") @@ -510,7 +509,6 @@ def call( Returns: A tuple of masked inputs and boolean mask. """ - if training is None: training = K.learning_phase() @@ -651,7 +649,6 @@ def loss( Negative mean log-likelihood of all examples, given the sequence of tag indices. """ - log_likelihood, _ = crf_log_likelihood( logits, tag_indices, sequence_lengths, self.transition_params ) @@ -664,8 +661,7 @@ def loss( def f1_score( self, tag_ids: tf.Tensor, pred_ids: tf.Tensor, mask: tf.Tensor ) -> tf.Tensor: - """Calculates f1 score for train predictions""" - + """Calculates f1 score for train predictions.""" mask_bool = tf.cast(mask[:, :, 0], tf.bool) # pick only non padding values and flatten sequences @@ -950,7 +946,6 @@ def _sample_negatives( all_labels: tf.Tensor, ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]: """Sample negative examples.""" - pos_inputs_embed = tf.expand_dims(inputs_embed, axis=-2) pos_labels_embed = tf.expand_dims(labels_embed, axis=-2) @@ -980,7 +975,6 @@ def _train_sim( mask: Optional[tf.Tensor], ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]: """Define similarity.""" - # calculate similarity with several # embedded actions for the loss neg_inf = tf.constant(-1e9) @@ -1024,7 +1018,6 @@ def _loss_margin( mask: Optional[tf.Tensor], ) -> tf.Tensor: """Define max margin loss.""" - # loss for maximizing similarity with correct action loss = tf.maximum(0.0, self.mu_pos - tf.squeeze(sim_pos, axis=-1)) diff --git a/rasa/utils/tensorflow/metrics.py b/rasa/utils/tensorflow/metrics.py index 7face21ff2b2..5a247485fb9a 100644 --- a/rasa/utils/tensorflow/metrics.py +++ b/rasa/utils/tensorflow/metrics.py @@ -185,7 +185,6 @@ def result(self) -> TensorLike: def get_config(self) -> Dict[str, Any]: """Returns the serializable config of the metric.""" - config = { "num_classes": self.num_classes, "average": self.average, @@ -269,7 +268,7 @@ class F1Score(FBetaScore): def __init__( self, num_classes: TensorLike, - average: str = None, + average: Optional[str] = None, threshold: Optional[TensorLike] = None, name: str = "f1_score", dtype: Any = None, diff --git a/rasa/utils/tensorflow/model_data.py b/rasa/utils/tensorflow/model_data.py index 128ff6cbd575..126985cb6f19 100644 --- a/rasa/utils/tensorflow/model_data.py +++ b/rasa/utils/tensorflow/model_data.py @@ -270,8 +270,7 @@ def __init__( label_sub_key: Optional[Text] = None, data: Optional[Data] = None, ) -> None: - """ - Initializes the RasaModelData object. + """Initializes the RasaModelData object. Args: label_key: the key of a label used for balancing, etc. @@ -400,7 +399,6 @@ def does_feature_not_exist(self, key: Text, sub_key: Optional[Text] = None) -> b def is_empty(self) -> bool: """Checks if data is set.""" - return not self.data def number_of_examples(self, data: Optional[Data] = None) -> int: diff --git a/rasa/utils/tensorflow/transformer.py b/rasa/utils/tensorflow/transformer.py index f2a2d66db563..cf267d15c4e0 100644 --- a/rasa/utils/tensorflow/transformer.py +++ b/rasa/utils/tensorflow/transformer.py @@ -161,7 +161,6 @@ def _relative_to_absolute_position(self, x: tf.Tensor) -> tf.Tensor: A tensor of shape (batch, num_heads, length, length, depth) or (batch, num_heads, length, length) """ - x_dim = len(x.shape) if x_dim < 4 or x_dim > 5: @@ -286,7 +285,6 @@ def _scaled_dot_product_attention( output: A tensor with shape (..., length, depth). attention_weights: A tensor with shape (..., length, length). """ - matmul_qk = tf.matmul(query, key, transpose_b=True) # (..., length, length) if self.use_key_relative_position: @@ -320,7 +318,6 @@ def _split_heads(self, x: tf.Tensor) -> tf.Tensor: Transpose the result such that the shape is (batch_size, num_heads, length, depth) """ - x = tf.reshape(x, (tf.shape(x)[0], -1, self.num_heads, self._depth)) return tf.transpose(x, perm=[0, 2, 1, 3]) @@ -333,7 +330,6 @@ def _combine_heads(self, x: tf.Tensor) -> tf.Tensor: Returns: A Tensor with shape [batch, length, units] """ - # (batch_size, length, num_heads, depth) x = tf.transpose(x, perm=[0, 2, 1, 3]) # (batch_size, length, units) diff --git a/rasa/validator.py b/rasa/validator.py index 0a06bb575268..285afdf640c7 100644 --- a/rasa/validator.py +++ b/rasa/validator.py @@ -1,12 +1,19 @@ import logging +import re +import string from collections import defaultdict -from typing import Set, Text, Optional, Dict, Any, List +from typing import Set, Text, Optional, Dict, Any, List, Tuple + +from pypred import Predicate import rasa.core.training.story_conflict from rasa.shared.core.flows.flow import ( ActionFlowStep, + BranchFlowStep, CollectInformationFlowStep, FlowsList, + IfFlowLink, + SetSlotsFlowStep, ) import rasa.shared.nlu.constants from rasa.shared.constants import ( @@ -15,7 +22,6 @@ CONFIG_MANDATORY_KEYS, DOCS_URL_DOMAINS, DOCS_URL_FORMS, - UTTER_ASK_PREFIX, UTTER_PREFIX, DOCS_URL_ACTIONS, REQUIRED_SLOTS_KEY, @@ -27,6 +33,7 @@ from rasa.shared.core.domain import Domain from rasa.shared.core.generator import TrainingDataGenerator from rasa.shared.core.constants import SlotMappingType, MAPPING_TYPE +from rasa.shared.core.slots import ListSlot, Slot from rasa.shared.core.training_data.structures import StoryGraph from rasa.shared.importers.importer import TrainingDataImporter from rasa.shared.nlu.training_data.training_data import TrainingData @@ -108,7 +115,6 @@ def verify_example_repetition_in_intents( self, ignore_warnings: bool = True ) -> bool: """Checks if there is no duplicated example in different intents.""" - everything_is_alright = True duplication_hash = defaultdict(set) @@ -133,8 +139,8 @@ def verify_intents_in_stories(self, ignore_warnings: bool = True) -> bool: """Checks intents used in stories. Verifies if the intents used in the stories are valid, and whether - all valid intents are used in the stories.""" - + all valid intents are used in the stories. + """ everything_is_alright = self.verify_intents(ignore_warnings=ignore_warnings) stories_intents = { @@ -221,39 +227,23 @@ def _utterances_used_in_stories(self) -> Set[str]: stories_utterances.add(event.action_name) return stories_utterances - def _utterances_used_in_flows(self) -> Set[str]: - """Return all utterances which are used in flows.""" - flow_utterances = set() - - for flow in self.flows.underlying_flows: - for step in flow.steps: - if isinstance(step, ActionFlowStep) and step.action.startswith( - UTTER_PREFIX - ): - flow_utterances.add(step.action) - if isinstance(step, CollectInformationFlowStep): - flow_utterances.add(UTTER_ASK_PREFIX + step.collect) - return flow_utterances - def verify_utterances_in_dialogues(self, ignore_warnings: bool = True) -> bool: """Verifies usage of utterances in stories or flows. Checks whether utterances used in the stories are valid, and whether all valid utterances are used in stories. """ - everything_is_alright = True - utterance_actions = self._gather_utterance_actions() stories_utterances = self._utterances_used_in_stories() - flow_utterances = self._utterances_used_in_flows() + flow_utterances = self.flows.utterances all_used_utterances = flow_utterances.union(stories_utterances) everything_is_alright = ( ignore_warnings or self._does_story_only_use_valid_actions( - stories_utterances, utterance_actions + stories_utterances, list(utterance_actions) ) ) @@ -345,7 +335,6 @@ def verify_story_structure( `False` is a conflict was found and `ignore_warnings` is `False`. `True` otherwise. """ - logger.info("Story structure validation...") trackers = TrainingDataGenerator( @@ -370,7 +359,6 @@ def verify_story_structure( def verify_nlu(self, ignore_warnings: bool = True) -> bool: """Runs all the validations on intents and utterances.""" - logger.info("Validating intents...") intents_are_valid = self.verify_intents_in_stories(ignore_warnings) @@ -483,3 +471,216 @@ def warn_if_config_mandatory_keys_are_not_set(self) -> None: f"'{ASSISTANT_ID_KEY}' mandatory key. Please replace the default " f"placeholder value with a unique identifier." ) + + @staticmethod + def _log_error_if_slot_not_in_domain( + slot_name: str, + domain_slots: Dict[Text, Slot], + step_id: str, + flow_id: str, + all_good: bool, + ) -> bool: + if slot_name not in domain_slots: + logger.error( + f"The slot '{slot_name}' is used in the " + f"step '{step_id}' of flow id '{flow_id}', but it " + f"is not listed in the domain slots. " + f"You should add it to your domain file!", + ) + all_good = False + + return all_good + + @staticmethod + def _log_error_if_list_slot( + slot: Slot, step_id: str, flow_id: str, all_good: bool + ) -> bool: + if isinstance(slot, ListSlot): + logger.error( + f"The slot '{slot.name}' is used in the " + f"step '{step_id}' of flow id '{flow_id}', but it " + f"is a list slot. List slots are currently not " + f"supported in flows. You should change it to a " + f"text, boolean or float slot in your domain file!", + ) + all_good = False + + return all_good + + @staticmethod + def _log_error_if_dialogue_stack_slot( + slot: Slot, step_id: str, flow_id: str, all_good: bool + ) -> bool: + if slot.name == constants.DIALOGUE_STACK_SLOT: + logger.error( + f"The slot '{constants.DIALOGUE_STACK_SLOT}' is used in the " + f"step '{step_id}' of flow id '{flow_id}', but it " + f"is a reserved slot. You must not use reserved slots in " + f"your flows.", + ) + all_good = False + + return all_good + + def verify_flows_steps_against_domain(self) -> bool: + """Checks flows steps' references against the domain file.""" + all_good = True + domain_slots = {slot.name: slot for slot in self.domain.slots} + for flow in self.flows.underlying_flows: + for step in flow.steps: + if isinstance(step, CollectInformationFlowStep): + all_good = self._log_error_if_slot_not_in_domain( + step.collect, domain_slots, step.id, flow.id, all_good + ) + current_slot = domain_slots.get(step.collect) + if not current_slot: + continue + + all_good = self._log_error_if_list_slot( + current_slot, step.id, flow.id, all_good + ) + all_good = self._log_error_if_dialogue_stack_slot( + current_slot, step.id, flow.id, all_good + ) + + elif isinstance(step, SetSlotsFlowStep): + for slot in step.slots: + slot_name = slot["key"] + all_good = self._log_error_if_slot_not_in_domain( + slot_name, domain_slots, step.id, flow.id, all_good + ) + current_slot = domain_slots.get(slot_name) + if not current_slot: + continue + + all_good = self._log_error_if_list_slot( + current_slot, step.id, flow.id, all_good + ) + all_good = self._log_error_if_dialogue_stack_slot( + current_slot, step.id, flow.id, all_good + ) + + elif isinstance(step, ActionFlowStep): + regex = r"{context\..+?}" + matches = re.findall(regex, step.action) + if matches: + logger.warning( + f"An interpolated action name '{step.action}' was " + f"found at step '{step.id}' of flow id '{flow.id}'. " + f"Skipping validation for this step. " + f"Please make sure that the action name is " + f"listed in your domain responses or actions." + ) + elif step.action not in self.domain.action_names_or_texts: + logger.error( + f"The action '{step.action}' is used in the step " + f"'{step.id}' of flow id '{flow.id}', but it " + f"is not listed in the domain file. " + f"You should add it to your domain file!", + ) + all_good = False + return all_good + + def verify_unique_flows(self) -> bool: + """Checks if all flows have unique names and descriptions.""" + all_good = True + flow_names = set() + flow_descriptions = set() + punctuation_table = str.maketrans({i: "" for i in string.punctuation}) + + for flow in self.flows.underlying_flows: + flow_description = flow.description + cleaned_description = flow_description.translate(punctuation_table) # type: ignore[union-attr] # noqa: E501 + if cleaned_description in flow_descriptions: + logger.error( + f"Detected duplicate flow description for flow id '{flow.id}'. " + f"Flow descriptions must be unique. " + f"Please make sure that all flows have different descriptions." + ) + all_good = False + + if not flow.name: + logger.error(f"Flow with flow id '{flow.id}' has an empty name.") + all_good = False + + if flow.name in flow_names: + logger.error( + f"Detected duplicate flow name '{flow.name}' for flow " + f"id '{flow.id}'. Flow names must be unique. " + f"Please make sure that all flows have different names." + ) + all_good = False + + flow_names.add(flow.name) + flow_descriptions.add(cleaned_description) + + return all_good + + @staticmethod + def _construct_predicate( + predicate: Optional[str], step_id: str, all_good: bool = True + ) -> Tuple[Optional[Predicate], bool]: + try: + pred = Predicate(predicate) + except (TypeError, Exception) as exception: + logger.error( + f"Could not initialize the predicate found under step " + f"'{step_id}': {exception}." + ) + pred = None + all_good = False + + return pred, all_good + + def verify_predicates(self) -> bool: + """Checks that predicates used in branch flow steps or `collect` steps are valid.""" # noqa: E501 + all_good = True + for flow in self.flows.underlying_flows: + for step in flow.steps: + if isinstance(step, BranchFlowStep): + for link in step.next.links: + if isinstance(link, IfFlowLink): + predicate, all_good = Validator._construct_predicate( + link.condition, step.id + ) + if predicate and not predicate.is_valid(): + logger.error( + f"Detected invalid condition '{link.condition}' " + f"at step '{step.id}' for flow id '{flow.id}'. " + f"Please make sure that all conditions are valid." + ) + all_good = False + elif isinstance(step, CollectInformationFlowStep): + predicates = [predicate.if_ for predicate in step.rejections] + for predicate in predicates: + pred, all_good = Validator._construct_predicate( + predicate, step.id + ) + if pred and not pred.is_valid(): + logger.error( + f"Detected invalid rejection '{predicate}' " + f"at `collect` step '{step.id}' " + f"for flow id '{flow.id}'. " + f"Please make sure that all conditions are valid." + ) + all_good = False + return all_good + + def verify_flows(self) -> bool: + """Checks for inconsistencies across flows.""" + logger.info("Validating flows...") + + if self.flows.is_empty(): + logger.warning( + "No flows were found in the data files. " + "Will not proceed with flow validation.", + ) + return True + + condition_one = self.verify_flows_steps_against_domain() + condition_two = self.verify_unique_flows() + condition_three = self.verify_predicates() + + all_good = all([condition_one, condition_two, condition_three]) + + return all_good diff --git a/rasa/version.py b/rasa/version.py index 6e999d3f7755..d15a4a5d8a6b 100644 --- a/rasa/version.py +++ b/rasa/version.py @@ -1,3 +1,3 @@ # this file will automatically be changed, # do not add anything but the version number here! -__version__ = "3.8.0a7" +__version__ = "3.8.0a11" diff --git a/tests/cdu/generator/test_llm_command_generator.py b/tests/cdu/generator/test_llm_command_generator.py new file mode 100644 index 000000000000..d2cd06d266fe --- /dev/null +++ b/tests/cdu/generator/test_llm_command_generator.py @@ -0,0 +1,41 @@ +import uuid + +import pytest +from _pytest.tmpdir import TempPathFactory + +from rasa.dialogue_understanding.generator.llm_command_generator import ( + LLMCommandGenerator, +) +from rasa.engine.storage.local_model_storage import LocalModelStorage +from rasa.engine.storage.resource import Resource +from rasa.engine.storage.storage import ModelStorage + + +@pytest.fixture(scope="session") +def resource() -> Resource: + return Resource(uuid.uuid4().hex) + + +@pytest.fixture(scope="session") +def model_storage(tmp_path_factory: TempPathFactory) -> ModelStorage: + return LocalModelStorage(tmp_path_factory.mktemp(uuid.uuid4().hex)) + + +async def test_llm_command_generator_prompt_init_custom( + model_storage: ModelStorage, resource: Resource +) -> None: + generator = LLMCommandGenerator( + {"prompt": "data/test_prompt_templates/test_prompt.jinja2"}, + model_storage, + resource, + ) + assert generator.prompt_template.startswith("This is a test prompt.") + + +async def test_llm_command_generator_prompt_init_default( + model_storage: ModelStorage, resource: Resource +) -> None: + generator = LLMCommandGenerator({}, model_storage, resource) + assert generator.prompt_template.startswith( + "Your task is to analyze the current conversation" + ) diff --git a/tests/cdu/__init__.py b/tests/cli/arguments/__init__.py similarity index 100% rename from tests/cdu/__init__.py rename to tests/cli/arguments/__init__.py diff --git a/tests/cli/arguments/test_run.py b/tests/cli/arguments/test_run.py new file mode 100644 index 000000000000..5d555e6db2b2 --- /dev/null +++ b/tests/cli/arguments/test_run.py @@ -0,0 +1,188 @@ +from typing import List, Dict + +import argparse +import pytest +from _pytest.monkeypatch import MonkeyPatch + +from rasa.cli.arguments.run import add_jwt_arguments, add_server_settings_arguments +from rasa.env import ( + JWT_SECRET_ENV, + JWT_METHOD_ENV, + JWT_PRIVATE_KEY_ENV, + DEFAULT_JWT_METHOD, + AUTH_TOKEN_ENV, +) + + +@pytest.mark.parametrize( + "env_variables, input_args, expected", + [ + ( + # all env variables are set + { + JWT_SECRET_ENV: "secret", + JWT_METHOD_ENV: "HS256", + JWT_PRIVATE_KEY_ENV: "private_key", + }, + [], + argparse.Namespace( + jwt_secret="secret", + jwt_method="HS256", + jwt_private_key="private_key", + ), + ), + ( + # no JWT_SECRET_ENV and --jwt-secret is set + { + JWT_METHOD_ENV: "HS256", + JWT_PRIVATE_KEY_ENV: "private_key", + }, + ["--jwt-secret", "secret"], + argparse.Namespace( + jwt_secret="secret", + jwt_method="HS256", + jwt_private_key="private_key", + ), + ), + ( + # no JWT_METHOD_ENV and --jwt-method is set + { + JWT_SECRET_ENV: "secret", + JWT_PRIVATE_KEY_ENV: "private_key", + }, + ["--jwt-method", "HS256"], + argparse.Namespace( + jwt_secret="secret", + jwt_method="HS256", + jwt_private_key="private_key", + ), + ), + ( + # no JWT_PRIVATE_KEY_ENV and --jwt-private-key is set + { + JWT_SECRET_ENV: "secret", + JWT_METHOD_ENV: "HS256", + }, + ["--jwt-private-key", "private_key"], + argparse.Namespace( + jwt_secret="secret", + jwt_method="HS256", + jwt_private_key="private_key", + ), + ), + ( + # no JWT_SECRET_ENV and no --jwt-secret + { + JWT_METHOD_ENV: "HS256", + JWT_PRIVATE_KEY_ENV: "private_key", + }, + [], + argparse.Namespace( + jwt_secret=None, + jwt_method="HS256", + jwt_private_key="private_key", + ), + ), + ( + # no JWT_METHOD_ENV and no --jwt-method + { + JWT_SECRET_ENV: "secret", + JWT_PRIVATE_KEY_ENV: "private_key", + }, + [], + argparse.Namespace( + jwt_secret="secret", + jwt_method=DEFAULT_JWT_METHOD, + jwt_private_key="private_key", + ), + ), + ( + # no JWT_PRIVATE_KEY_ENV and no --jwt-private-key + { + JWT_SECRET_ENV: "secret", + JWT_METHOD_ENV: "HS256", + }, + [], + argparse.Namespace( + jwt_secret="secret", + jwt_method="HS256", + jwt_private_key=None, + ), + ), + ( + # no env variables and no arguments + {}, + [], + argparse.Namespace( + jwt_secret=None, + jwt_method="HS256", + jwt_private_key=None, + ), + ), + ], +) +def test_jwt_argument_parsing( + env_variables: Dict[str, str], + input_args: List[str], + expected: argparse.Namespace, + monkeypatch: MonkeyPatch, +) -> None: + """Tests parsing of the JWT arguments.""" + parser = argparse.ArgumentParser() + + for env_name, env_value in env_variables.items(): + monkeypatch.setenv(env_name, env_value) + + add_jwt_arguments(parser) + args = parser.parse_args(input_args) + + assert args.jwt_secret == expected.jwt_secret + assert args.jwt_method == expected.jwt_method + assert args.jwt_private_key == expected.jwt_private_key + + +@pytest.mark.parametrize( + "env_variables, input_args, expected", + [ + ( + { + AUTH_TOKEN_ENV: "secret", + }, + [], + argparse.Namespace( + auth_token="secret", + ), + ), + ( + {}, + ["--auth-token", "secret"], + argparse.Namespace( + auth_token="secret", + ), + ), + ( + {}, + [], + argparse.Namespace( + auth_token=None, + ), + ), + ], +) +def test_add_server_settings_arguments( + env_variables: Dict[str, str], + input_args: List[str], + expected: argparse.Namespace, + monkeypatch: MonkeyPatch, +) -> None: + """Tests parsing of the server settings arguments.""" + parser = argparse.ArgumentParser() + + for env_name, env_value in env_variables.items(): + monkeypatch.setenv(env_name, env_value) + + add_server_settings_arguments(parser) + + args = parser.parse_args(input_args) + + assert args.auth_token == expected.auth_token diff --git a/tests/cli/test_cli.py b/tests/cli/test_cli.py index f22a89400158..028c7ba785b5 100644 --- a/tests/cli/test_cli.py +++ b/tests/cli/test_cli.py @@ -43,7 +43,7 @@ def test_data_convert_help(run: Callable[..., RunResult]): output = run("--help") help_text = f"""usage: {RASA_EXE} [-h] [--version] - {{init,run,shell,chat,train,interactive,telemetry,test,visualize,data,export,x,evaluate}} + {{init,run,shell,train,interactive,telemetry,test,visualize,data,export,x,evaluate}} ...""" lines = help_text.split("\n") diff --git a/tests/cli/test_rasa_data.py b/tests/cli/test_rasa_data.py index e8c2f869b9ca..b9644a7ab5c8 100644 --- a/tests/cli/test_rasa_data.py +++ b/tests/cli/test_rasa_data.py @@ -4,6 +4,8 @@ from _pytest.fixtures import FixtureRequest from _pytest.pytester import RunResult + +from rasa.shared.constants import LATEST_TRAINING_DATA_FORMAT_VERSION from rasa.shared.nlu.training_data.formats import RasaYAMLReader import rasa.shared.utils.io @@ -139,7 +141,7 @@ def test_data_validate_help(run: Callable[..., RunResult]): [--max-history MAX_HISTORY] [-c CONFIG] [--fail-on-warnings] [-d DOMAIN] [--data DATA [DATA ...]] - {{stories}} ...""" + {{stories,flows}} ...""" lines = help_text.split("\n") # expected help text lines should appear somewhere in the output @@ -255,3 +257,41 @@ def test_data_split_stories(run_in_simple_project: Callable[..., RunResult]): test_data = rasa.shared.utils.io.read_yaml_file(test_file) assert len(test_data.get("stories", [])) == 1 assert test_data["stories"][0].get("story") == "story 2" + + +def test_rasa_data_validate_flows_success( + run_in_simple_project: Callable[..., RunResult] +) -> None: + flows_yaml = f""" +version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" +flows: + transfer_money: + description: This flow lets users send money. + name: transfer money + steps: + - id: "ask_recipient" + collect: "transfer_recipient" + next: "ask_amount" + - id: "ask_amount" + collect: "transfer_amount" + next: "execute_transfer" + - id: "execute_transfer" + action: action_transfer_money""" + + Path("data/flows.yml").write_text(flows_yaml) + + domain_yaml = """ + actions: + - action_transfer_money + intents: + - transfer_money + slots: + transfer_recipient: + type: text + mappings: [] + transfer_amount: + type: float + mappings: []""" + Path("domain.yml").write_text(domain_yaml) + result = run_in_simple_project("data", "validate", "flows") + assert result.ret == 0 diff --git a/tests/cli/test_utils.py b/tests/cli/test_utils.py index 153d9fc8ad23..c30616a01e59 100644 --- a/tests/cli/test_utils.py +++ b/tests/cli/test_utils.py @@ -319,7 +319,7 @@ def test_validate_assistant_id_in_config(config_file: Text) -> None: copy_config_data = copy.deepcopy(rasa.shared.utils.io.read_yaml_file(config_file)) warning_message = ( - f"The config file '{str(config_file)}' is missing a " + f"The config file '{config_file!s}' is missing a " f"unique value for the '{ASSISTANT_ID_KEY}' mandatory key." ) with pytest.warns(UserWarning, match=warning_message): diff --git a/tests/core/actions/test_action_trigger_chitchat.py b/tests/core/actions/test_action_trigger_chitchat.py new file mode 100644 index 000000000000..98f58ac31836 --- /dev/null +++ b/tests/core/actions/test_action_trigger_chitchat.py @@ -0,0 +1,22 @@ +from rasa.core.actions.action_trigger_chitchat import ActionTriggerChitchat +from rasa.core.channels import CollectingOutputChannel +from rasa.core.nlg import TemplatedNaturalLanguageGenerator +from rasa.dialogue_understanding.stack.frames import ChitChatStackFrame +from rasa.shared.core.constants import DIALOGUE_STACK_SLOT +from rasa.shared.core.domain import Domain +from rasa.shared.core.events import SlotSet +from rasa.shared.core.trackers import DialogueStateTracker + + +async def test_action_trigger_chitchat(): + tracker = DialogueStateTracker.from_events("test", []) + action = ActionTriggerChitchat() + channel = CollectingOutputChannel() + nlg = TemplatedNaturalLanguageGenerator({}) + events = await action.run(channel, nlg, tracker, Domain.empty()) + assert len(events) == 1 + event = events[0] + assert isinstance(event, SlotSet) + assert event.key == DIALOGUE_STACK_SLOT + assert len(event.value) == 1 + assert event.value[0]["type"] == ChitChatStackFrame.type() diff --git a/tests/core/actions/test_action_trigger_search.py b/tests/core/actions/test_action_trigger_search.py new file mode 100644 index 000000000000..fc9545c84b4f --- /dev/null +++ b/tests/core/actions/test_action_trigger_search.py @@ -0,0 +1,22 @@ +from rasa.core.actions.action_trigger_search import ActionTriggerSearch +from rasa.core.channels import CollectingOutputChannel +from rasa.core.nlg import TemplatedNaturalLanguageGenerator +from rasa.dialogue_understanding.stack.frames import SearchStackFrame +from rasa.shared.core.constants import DIALOGUE_STACK_SLOT +from rasa.shared.core.domain import Domain +from rasa.shared.core.events import SlotSet +from rasa.shared.core.trackers import DialogueStateTracker + + +async def test_action_trigger_search(): + tracker = DialogueStateTracker.from_events("test", []) + action = ActionTriggerSearch() + channel = CollectingOutputChannel() + nlg = TemplatedNaturalLanguageGenerator({}) + events = await action.run(channel, nlg, tracker, Domain.empty()) + assert len(events) == 1 + event = events[0] + assert isinstance(event, SlotSet) + assert event.key == DIALOGUE_STACK_SLOT + assert len(event.value) == 1 + assert event.value[0]["type"] == SearchStackFrame.type() diff --git a/tests/core/featurizers/test_precomputation.py b/tests/core/featurizers/test_precomputation.py index 465fb3b61f70..9d05fe60966d 100644 --- a/tests/core/featurizers/test_precomputation.py +++ b/tests/core/featurizers/test_precomputation.py @@ -443,7 +443,7 @@ def _check_messages_contain_attribute_which_is_key_attribute(messages: List[Mess for message in messages: assert len(message.data) == 1 assert ( - list(message.data.keys())[0] + list(message.data.keys())[0] # noqa: RUF015 in MessageContainerForCoreFeaturization.KEY_ATTRIBUTES ) diff --git a/tests/core/featurizers/test_tracker_featurizer.py b/tests/core/featurizers/test_tracker_featurizer.py index c65bf18e1a60..2066a65d6cf4 100644 --- a/tests/core/featurizers/test_tracker_featurizer.py +++ b/tests/core/featurizers/test_tracker_featurizer.py @@ -179,14 +179,25 @@ def test_featurize_trackers_with_full_dialogue_tracker_featurizer( }, ] ] - assert actual_features is not None assert len(actual_features) == len(expected_features) for actual, expected in zip(actual_features, expected_features): assert compare_featurized_states(actual, expected) - expected_labels = np.array([[0, 21, 0, 18, 19, 0, 20]]) + expected_labels = np.array( + [ + [ + 0, + moodbot_domain.action_names_or_texts.index("utter_greet"), + 0, + moodbot_domain.action_names_or_texts.index("utter_cheer_up"), + moodbot_domain.action_names_or_texts.index("utter_did_that_help"), + 0, + moodbot_domain.action_names_or_texts.index("utter_goodbye"), + ] + ] + ) assert actual_labels is not None assert len(actual_labels) == 1 for actual, expected in zip(actual_labels, expected_labels): @@ -255,7 +266,19 @@ def test_trackers_ignore_action_unlikely_intent_with_full_dialogue_tracker_featu for actual, expected in zip(actual_features, expected_features): assert compare_featurized_states(actual, expected) - expected_labels = np.array([[0, 21, 0, 18, 19, 0, 20]]) + expected_labels = np.array( + [ + [ + 0, + moodbot_domain.action_names_or_texts.index("utter_greet"), + 0, + moodbot_domain.action_names_or_texts.index("utter_cheer_up"), + moodbot_domain.action_names_or_texts.index("utter_did_that_help"), + 0, + moodbot_domain.action_names_or_texts.index("utter_goodbye"), + ] + ] + ) assert actual_labels is not None assert len(actual_labels) == 1 for actual, expected in zip(actual_labels, expected_labels): @@ -324,7 +347,22 @@ def test_trackers_keep_action_unlikely_intent_with_full_dialogue_tracker_featuri for actual, expected in zip(actual_features, expected_features): assert compare_featurized_states(actual, expected) - expected_labels = np.array([[0, 9, 21, 0, 9, 18, 19, 0, 9, 20]]) + expected_labels = np.array( + [ + [ + 0, + moodbot_domain.action_names_or_texts.index("action_unlikely_intent"), + moodbot_domain.action_names_or_texts.index("utter_greet"), + 0, + moodbot_domain.action_names_or_texts.index("action_unlikely_intent"), + moodbot_domain.action_names_or_texts.index("utter_cheer_up"), + moodbot_domain.action_names_or_texts.index("utter_did_that_help"), + 0, + moodbot_domain.action_names_or_texts.index("action_unlikely_intent"), + moodbot_domain.action_names_or_texts.index("utter_goodbye"), + ] + ] + ) assert actual_labels is not None assert len(actual_labels) == 1 for actual, expected in zip(actual_labels, expected_labels): @@ -832,7 +870,19 @@ def test_featurize_trackers_with_max_history_tracker_featurizer( for actual, expected in zip(actual_features, expected_features): assert compare_featurized_states(actual, expected) - expected_labels = np.array([[0, 21, 0, 18, 19, 0, 20]]).T + expected_labels = np.array( + [ + [ + 0, + moodbot_domain.action_names_or_texts.index("utter_greet"), + 0, + moodbot_domain.action_names_or_texts.index("utter_cheer_up"), + moodbot_domain.action_names_or_texts.index("utter_did_that_help"), + 0, + moodbot_domain.action_names_or_texts.index("utter_goodbye"), + ] + ] + ).T assert actual_labels is not None assert actual_labels.shape == expected_labels.shape @@ -899,7 +949,15 @@ def test_featurize_trackers_ignore_action_unlikely_intent_max_history_featurizer for actual, expected in zip(actual_features, expected_features): assert compare_featurized_states(actual, expected) - expected_labels = np.array([[0, 21, 0]]).T + expected_labels = np.array( + [ + [ + 0, + moodbot_domain.action_names_or_texts.index("utter_greet"), + 0, + ] + ] + ).T assert actual_labels.shape == expected_labels.shape for actual, expected in zip(actual_labels, expected_labels): assert np.all(actual == expected) @@ -971,7 +1029,16 @@ def test_featurize_trackers_keep_action_unlikely_intent_max_history_featurizer( for actual, expected in zip(actual_features, expected_features): assert compare_featurized_states(actual, expected) - expected_labels = np.array([[0, 9, 21, 0]]).T + expected_labels = np.array( + [ + [ + 0, + moodbot_domain.action_names_or_texts.index("action_unlikely_intent"), + moodbot_domain.action_names_or_texts.index("utter_greet"), + 0, + ] + ] + ).T assert actual_labels is not None assert actual_labels.shape == expected_labels.shape for actual, expected in zip(actual_labels, expected_labels): @@ -1088,7 +1155,19 @@ def test_deduplicate_featurize_trackers_with_max_history_tracker_featurizer( for actual, expected in zip(actual_features, expected_features): assert compare_featurized_states(actual, expected) - expected_labels = np.array([[0, 21, 0, 18, 19, 0, 20]]).T + expected_labels = np.array( + [ + [ + 0, + moodbot_domain.action_names_or_texts.index("utter_greet"), + 0, + moodbot_domain.action_names_or_texts.index("utter_cheer_up"), + moodbot_domain.action_names_or_texts.index("utter_did_that_help"), + 0, + moodbot_domain.action_names_or_texts.index("utter_goodbye"), + ] + ] + ).T if not remove_duplicates: expected_labels = np.vstack([expected_labels] * 2) diff --git a/tests/core/flows/test_flow.py b/tests/core/flows/test_flow.py index 4192ce132962..9d4f6b7692c1 100644 --- a/tests/core/flows/test_flow.py +++ b/tests/core/flows/test_flow.py @@ -1,5 +1,6 @@ from rasa.shared.core.flows.flow import FlowsList -from tests.utilities import flows_from_str +from rasa.shared.core.flows.yaml_flows_io import flows_from_str +from rasa.shared.importers.importer import FlowSyncImporter def test_non_pattern_flows(): @@ -34,3 +35,40 @@ def test_non_pattern_flows_handles_patterns_only(): """ ) assert all_flows.non_pattern_flows() == [] + + +def test_collecting_flow_utterances(): + all_flows = flows_from_str( + """ + flows: + foo: + steps: + - action: utter_welcome + - action: setup + - collect: age + rejections: + - if: age<18 + utter: utter_too_young + - if: age>100 + utter: utter_too_old + bar: + steps: + - action: utter_hello + - collect: income + utter: utter_ask_income_politely + """ + ) + assert all_flows.utterances == { + "utter_ask_age", + "utter_ask_income_politely", + "utter_hello", + "utter_welcome", + "utter_too_young", + "utter_too_old", + } + + +def test_default_flows_have_non_empty_names(): + default_flows = FlowSyncImporter.load_default_pattern_flows() + for flow in default_flows.underlying_flows: + assert flow.name diff --git a/tests/core/nlg/test_response.py b/tests/core/nlg/test_response.py index 7bbccb45d61d..9c0583b3f7bd 100644 --- a/tests/core/nlg/test_response.py +++ b/tests/core/nlg/test_response.py @@ -9,6 +9,7 @@ from rasa.shared.core.domain import Domain from rasa.shared.core.slots import TextSlot, AnySlot, CategoricalSlot, BooleanSlot from rasa.shared.core.trackers import DialogueStateTracker +from rasa.shared.utils.validation import YamlValidationException async def test_nlg_conditional_response_variations_with_no_slots(): @@ -625,3 +626,93 @@ async def test_nlg_conditional_response_variations_condition_logging( "[condition 2] type: slot | name: test_B | value: B" in message for message in caplog.messages ) + + +async def test_nlg_response_with_no_text(): + with pytest.raises(YamlValidationException): + Domain.from_yaml( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + responses: + utter_flow_xyz: + - buttons: + - payload: "yes" + title: Yes + - payload: "no" + title: No + + """ + ) + + +async def test_nlg_response_with_default_template_engine(): + domain = Domain.from_yaml( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + responses: + utter_flow_xyz: + - text: "Do you want to update the values?" + """ + ) + t = TemplatedNaturalLanguageGenerator(domain.responses) + r = t.generate_from_slots( + "utter_flow_xyz", + {"tm": "50"}, + { + "frame_id": "XYYZABCD", + "corrected_slots": {"tm": "100"}, + }, + "", + ) + assert r.get("text") == "Do you want to update the values?" + + +async def test_nlg_response_with_jinja_template(): + domain = Domain.from_yaml( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + responses: + utter_flow_xyz: + - text: "Do you want to update the + {{{{ context.corrected_slots.keys()|join(', ') }}}}?" + metadata: + rephrase: true + template: jinja + """ + ) + t = TemplatedNaturalLanguageGenerator(domain.responses) + r = t.generate_from_slots( + "utter_flow_xyz", + {"tm": "50"}, + { + "frame_id": "XYYZABCD", + "corrected_slots": {"tm": "100"}, + }, + "", + ) + assert r.get("text") == "Do you want to update the tm?" + + +async def test_nlg_response_with_unknown_var_jinja_template(): + domain = Domain.from_yaml( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + responses: + utter_flow_xyz: + - text: "Do you want to update the {{{{ context.unknown_key }}}}?" + metadata: + rephrase: true + template: jinja + """ + ) + t = TemplatedNaturalLanguageGenerator(domain.responses) + r = t.generate_from_slots( + "utter_flow_xyz", + {"tm": "50"}, + { + "frame_id": "XYYZABCD", + "corrected_slots": {"tm": "100"}, + }, + "", + ) + assert r.get("text") == "Do you want to update the ?" diff --git a/tests/core/policies/test_flow_policy.py b/tests/core/policies/test_flow_policy.py index 4bfcf918237b..6575a4f679e6 100644 --- a/tests/core/policies/test_flow_policy.py +++ b/tests/core/policies/test_flow_policy.py @@ -1,13 +1,70 @@ import textwrap from typing import List, Optional, Text, Tuple +import pytest + from rasa.core.policies.flow_policy import ( + FlowCircuitBreakerTrippedException, FlowExecutor, + FlowPolicy, ) +from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack +from rasa.engine.graph import ExecutionContext +from rasa.engine.storage.resource import Resource +from rasa.engine.storage.storage import ModelStorage from rasa.shared.core.domain import Domain -from rasa.shared.core.events import ActionExecuted, Event +from rasa.shared.core.events import ActionExecuted, Event, SlotSet +from rasa.shared.core.flows.flow import FlowsList from rasa.shared.core.flows.yaml_flows_io import YAMLFlowsReader from rasa.shared.core.trackers import DialogueStateTracker +from rasa.dialogue_understanding.stack.frames import ( + UserFlowStackFrame, + SearchStackFrame, +) +from tests.utilities import ( + flows_default_domain, + flows_from_str, + flows_from_str_with_defaults, +) + + +@pytest.fixture() +def resource() -> Resource: + return Resource("flow_policy") + + +@pytest.fixture() +def default_flow_policy( + resource: Resource, + default_model_storage: ModelStorage, + default_execution_context: ExecutionContext, +) -> FlowPolicy: + return FlowPolicy( + config={}, + model_storage=default_model_storage, + resource=resource, + execution_context=default_execution_context, + ) + + +@pytest.fixture() +def default_flows() -> FlowsList: + return flows_from_str( + """ + flows: + foo_flow: + steps: + - id: "1" + action: action_listen + next: "2" + - id: "2" + action: action_unlikely_intent # some action that exists by default + bar_flow: + steps: + - id: first_step + action: action_listen + """ + ) def _run_flow_until_listen( @@ -36,6 +93,7 @@ def _run_flow_until_listen( return actions, events +@pytest.mark.skip(reason="Skip until intent gets replaced by nlu_trigger") def test_select_next_action() -> None: flows = YAMLFlowsReader.read_from_string( textwrap.dedent( @@ -66,3 +124,207 @@ def test_select_next_action() -> None: assert actions == ["flow_test_flow", None] assert events == [] + + +def test_flow_policy_does_support_user_flowstack_frame(): + frame = UserFlowStackFrame(flow_id="foo", step_id="first_step", frame_id="some-id") + assert FlowPolicy.does_support_stack_frame(frame) + + +def test_flow_policy_does_not_support_search_frame(): + frame = SearchStackFrame( + frame_id="some-id", + ) + assert not FlowPolicy.does_support_stack_frame(frame) + + +def test_get_default_config(): + assert FlowPolicy.get_default_config() == {"priority": 1, "max_history": None} + + +def test_predict_action_probabilities_abstains_from_unsupported_frame( + default_flow_policy: FlowPolicy, +): + domain = Domain.empty() + + stack = DialogueStack(frames=[SearchStackFrame(frame_id="some-id")]) + # create a tracker with the stack set + tracker = DialogueStateTracker.from_events( + "test abstain", + domain=domain, + slots=domain.slots, + evts=[ActionExecuted(action_name="action_listen"), stack.persist_as_event()], + ) + + prediction = default_flow_policy.predict_action_probabilities( + tracker=tracker, + domain=Domain.empty(), + ) + + # check that the policy didn't predict anything + assert prediction.max_confidence == 0.0 + + +def test_predict_action_probabilities_advances_topmost_flow( + default_flow_policy: FlowPolicy, default_flows: FlowsList +): + domain = Domain.empty() + + stack = DialogueStack( + frames=[UserFlowStackFrame(flow_id="foo_flow", step_id="1", frame_id="some-id")] + ) + + tracker = DialogueStateTracker.from_events( + "test abstain", + domain=domain, + slots=domain.slots, + evts=[ActionExecuted(action_name="action_listen"), stack.persist_as_event()], + ) + + prediction = default_flow_policy.predict_action_probabilities( + tracker=tracker, domain=Domain.empty(), flows=default_flows + ) + + assert prediction.max_confidence == 1.0 + + predicted_idx = prediction.max_confidence_index + assert domain.action_names_or_texts[predicted_idx] == "action_unlikely_intent" + # check that the stack was updated + assert prediction.optional_events == [ + SlotSet( + "dialogue_stack", + [ + { + "frame_id": "some-id", + "flow_id": "foo_flow", + "step_id": "2", + "frame_type": "regular", + "type": "flow", + } + ], + ) + ] + + +def test_executor_trips_internal_circuit_breaker(): + flow_with_loop = flows_from_str( + """ + flows: + foo_flow: + steps: + - id: "1" + set_slots: + - foo: bar + next: "2" + - id: "2" + set_slots: + - foo: barbar + next: "1" + """ + ) + + domain = Domain.empty() + + stack = DialogueStack( + frames=[UserFlowStackFrame(flow_id="foo_flow", step_id="1", frame_id="some-id")] + ) + + tracker = DialogueStateTracker.from_events( + "test", + evts=[ActionExecuted(action_name="action_listen"), stack.persist_as_event()], + domain=domain, + slots=domain.slots, + ) + + executor = FlowExecutor.from_tracker(tracker, flow_with_loop, domain) + + with pytest.raises(FlowCircuitBreakerTrippedException): + executor.select_next_action(tracker) + + +def test_policy_triggers_error_pattern_if_internal_circuit_breaker_is_tripped( + default_flow_policy: FlowPolicy, +): + flow_with_loop = flows_from_str_with_defaults( + """ + flows: + foo_flow: + steps: + - id: "1" + set_slots: + - foo: bar + next: "2" + - id: "2" + set_slots: + - foo: barbar + next: "1" + """ + ) + + domain = flows_default_domain() + + stack = DialogueStack( + frames=[UserFlowStackFrame(flow_id="foo_flow", step_id="1", frame_id="some-id")] + ) + + tracker = DialogueStateTracker.from_events( + "test", + evts=[ActionExecuted(action_name="action_listen"), stack.persist_as_event()], + domain=domain, + slots=domain.slots, + ) + + prediction = default_flow_policy.predict_action_probabilities( + tracker=tracker, domain=domain, flows=flow_with_loop + ) + + assert prediction.max_confidence == 1.0 + + predicted_idx = prediction.max_confidence_index + assert domain.action_names_or_texts[predicted_idx] == "utter_internal_error_rasa" + # check that the stack was updated. + assert len(prediction.optional_events) == 1 + assert isinstance(prediction.optional_events[0], SlotSet) + + assert prediction.optional_events[0].key == "dialogue_stack" + # the user flow should be on the stack as well as the error pattern + assert len(prediction.optional_events[0].value) == 2 + # the user flow should be about to end + assert prediction.optional_events[0].value[0]["step_id"] == "NEXT:END" + # the pattern should be the other frame + assert prediction.optional_events[0].value[1]["flow_id"] == "pattern_internal_error" + + +def test_executor_does_not_get_tripped_if_an_action_is_predicted_in_loop(): + flow_with_loop = flows_from_str( + """ + flows: + foo_flow: + steps: + - id: "1" + set_slots: + - foo: bar + next: "2" + - id: "2" + action: action_listen + next: "1" + """ + ) + + domain = Domain.empty() + + stack = DialogueStack( + frames=[UserFlowStackFrame(flow_id="foo_flow", step_id="1", frame_id="some-id")] + ) + + tracker = DialogueStateTracker.from_events( + "test", + evts=[ActionExecuted(action_name="action_listen"), stack.persist_as_event()], + domain=domain, + slots=domain.slots, + ) + + executor = FlowExecutor.from_tracker(tracker, flow_with_loop, domain) + + selection = executor.select_next_action(tracker) + assert selection.action_name == "action_listen" diff --git a/tests/core/test_actions.py b/tests/core/test_actions.py index c240d065f775..2b80edef8876 100644 --- a/tests/core/test_actions.py +++ b/tests/core/test_actions.py @@ -26,6 +26,7 @@ ActionSessionStart, ActionEndToEndResponse, ActionExtractSlots, + default_actions, ) from rasa.core.actions.forms import FormAction from rasa.core.channels import CollectingOutputChannel, OutputChannel @@ -72,31 +73,17 @@ import rasa.shared.utils.common from rasa.core.nlg.response import TemplatedNaturalLanguageGenerator from rasa.shared.core.constants import ( - ACTION_CANCEL_FLOW, - ACTION_CLARIFY_FLOWS, - ACTION_CORRECT_FLOW_SLOT, - ACTION_RUN_SLOT_REJECTIONS_NAME, USER_INTENT_SESSION_START, ACTION_LISTEN_NAME, - ACTION_RESTART_NAME, - ACTION_SESSION_START_NAME, - ACTION_DEFAULT_FALLBACK_NAME, - ACTION_DEACTIVATE_LOOP_NAME, - ACTION_REVERT_FALLBACK_EVENTS_NAME, - ACTION_DEFAULT_ASK_AFFIRMATION_NAME, - ACTION_DEFAULT_ASK_REPHRASE_NAME, - ACTION_BACK_NAME, - ACTION_TWO_STAGE_FALLBACK_NAME, - ACTION_UNLIKELY_INTENT_NAME, - RULE_SNIPPET_ACTION_NAME, - ACTION_SEND_TEXT_NAME, ACTIVE_LOOP, FOLLOWUP_ACTION, REQUESTED_SLOT, SESSION_START_METADATA_SLOT, - ACTION_EXTRACT_SLOTS, DIALOGUE_STACK_SLOT, RETURN_VALUE_SLOT, + FLOW_HASHES_SLOT, + DEFAULT_ACTION_NAMES, + RULE_SNIPPET_ACTION_NAME, ) from rasa.shared.core.trackers import DialogueStateTracker from rasa.shared.exceptions import RasaException @@ -145,29 +132,15 @@ def test_domain_action_instantiation(): action.action_for_name_or_text(action_name, domain, None) for action_name in domain.action_names_or_texts ] - - assert len(instantiated_actions) == 21 + expected_action_names = DEFAULT_ACTION_NAMES + [ + "my_module.ActionTest", + "utter_test", + "utter_chitchat", + ] + assert len(instantiated_actions) == len(expected_action_names) + for i, instantiated_action in enumerate(instantiated_actions): + assert instantiated_action.name() == expected_action_names[i] assert instantiated_actions[0].name() == ACTION_LISTEN_NAME - assert instantiated_actions[1].name() == ACTION_RESTART_NAME - assert instantiated_actions[2].name() == ACTION_SESSION_START_NAME - assert instantiated_actions[3].name() == ACTION_DEFAULT_FALLBACK_NAME - assert instantiated_actions[4].name() == ACTION_DEACTIVATE_LOOP_NAME - assert instantiated_actions[5].name() == ACTION_REVERT_FALLBACK_EVENTS_NAME - assert instantiated_actions[6].name() == ACTION_DEFAULT_ASK_AFFIRMATION_NAME - assert instantiated_actions[7].name() == ACTION_DEFAULT_ASK_REPHRASE_NAME - assert instantiated_actions[8].name() == ACTION_TWO_STAGE_FALLBACK_NAME - assert instantiated_actions[9].name() == ACTION_UNLIKELY_INTENT_NAME - assert instantiated_actions[10].name() == ACTION_BACK_NAME - assert instantiated_actions[11].name() == ACTION_SEND_TEXT_NAME - assert instantiated_actions[12].name() == RULE_SNIPPET_ACTION_NAME - assert instantiated_actions[13].name() == ACTION_EXTRACT_SLOTS - assert instantiated_actions[14].name() == ACTION_CANCEL_FLOW - assert instantiated_actions[15].name() == ACTION_CORRECT_FLOW_SLOT - assert instantiated_actions[16].name() == ACTION_CLARIFY_FLOWS - assert instantiated_actions[17].name() == ACTION_RUN_SLOT_REJECTIONS_NAME - assert instantiated_actions[18].name() == "my_module.ActionTest" - assert instantiated_actions[19].name() == "utter_test" - assert instantiated_actions[20].name() == "utter_chitchat" @pytest.mark.parametrize( @@ -248,6 +221,7 @@ async def test_remote_action_runs( "slots": { "name": None, REQUESTED_SLOT: None, + FLOW_HASHES_SLOT: None, SESSION_START_METADATA_SLOT: None, DIALOGUE_STACK_SLOT: None, RETURN_VALUE_SLOT: None, @@ -312,6 +286,7 @@ async def test_remote_action_logs_events( "slots": { "name": None, REQUESTED_SLOT: None, + FLOW_HASHES_SLOT: None, SESSION_START_METADATA_SLOT: None, DIALOGUE_STACK_SLOT: None, RETURN_VALUE_SLOT: None, @@ -3056,3 +3031,11 @@ async def test_action_send_text_handles_missing_metadata( ) assert events == [BotUttered("")] + + +def test_default_actions_and_names_consistency(): + names_of_default_actions = {action.name() for action in default_actions()} + names_of_executable_actions_in_constants = set(DEFAULT_ACTION_NAMES) - { + RULE_SNIPPET_ACTION_NAME + } + assert names_of_default_actions == names_of_executable_actions_in_constants diff --git a/tests/core/test_lock_store.py b/tests/core/test_lock_store.py index bba472696197..d90cd26a61b5 100644 --- a/tests/core/test_lock_store.py +++ b/tests/core/test_lock_store.py @@ -1,31 +1,30 @@ import asyncio import logging import sys +import time from pathlib import Path +from typing import Text +from unittest.mock import Mock, patch import numpy as np import pytest -import time - +import rasa.core.lock_store from _pytest.logging import LogCaptureFixture from _pytest.monkeypatch import MonkeyPatch -from unittest.mock import patch, Mock - from rasa.core.agent import Agent from rasa.core.channels import UserMessage from rasa.core.constants import DEFAULT_LOCK_LIFETIME -from rasa.shared.constants import INTENT_MESSAGE_PREFIX from rasa.core.lock import TicketLock -import rasa.core.lock_store from rasa.core.lock_store import ( + DEFAULT_REDIS_LOCK_STORE_KEY_PREFIX, InMemoryLockStore, LockError, LockStore, RedisLockStore, - DEFAULT_REDIS_LOCK_STORE_KEY_PREFIX, ) +from rasa.shared.constants import INTENT_MESSAGE_PREFIX from rasa.shared.exceptions import ConnectionException -from rasa.utils.endpoints import EndpointConfig +from rasa.utils.endpoints import EndpointConfig, read_endpoint_config class FakeRedisLockStore(RedisLockStore): @@ -384,3 +383,21 @@ async def test_redis_lock_store_with_valid_prefix(monkeypatch: MonkeyPatch): with pytest.raises(LockError): async with lock_store.lock("some sender"): pass + + +def test_create_lock_store_from_endpoint_config(endpoints_path: Text): + store = read_endpoint_config(endpoints_path, endpoint_type="lock_store") + tracker_store = RedisLockStore( + host="localhost", + port=6379, + db=0, + username="username", + password="password", + use_ssl=True, + ssl_keyfile="keyfile.key", + ssl_certfile="certfile.crt", + ssl_ca_certs="my-bundle.ca-bundle", + key_prefix="lock", + ) + + assert isinstance(tracker_store, type(LockStore.create(store))) diff --git a/tests/core/test_nlg.py b/tests/core/test_nlg.py index 17a8d062aed3..87a785a7578d 100644 --- a/tests/core/test_nlg.py +++ b/tests/core/test_nlg.py @@ -291,7 +291,7 @@ def test_nlg_fill_response_text_and_custom( "text": str(text_slot_value), "custom": { "field": str(cust_slot_value), - "properties": {"field_prefixed": f"prefix_{str(cust_slot_value)}"}, + "properties": {"field_prefixed": f"prefix_{cust_slot_value!s}"}, }, } diff --git a/tests/core/test_tracker_stores.py b/tests/core/test_tracker_stores.py index dd19b49c9051..8fe3419e766d 100644 --- a/tests/core/test_tracker_stores.py +++ b/tests/core/test_tracker_stores.py @@ -146,6 +146,7 @@ def test_tracker_store_endpoint_config_loading(endpoints_path: Text): "url": "localhost", "port": 6379, "db": 0, + "username": "username", "password": "password", "timeout": 30000, "use_ssl": True, @@ -165,6 +166,7 @@ def test_create_tracker_store_from_endpoint_config( host="localhost", port=6379, db=0, + username="username", password="password", record_exp=3000, use_ssl=True, diff --git a/tests/core/utilities.py b/tests/core/utilities.py index e11e0d18f4ce..68fcb17c5d1e 100644 --- a/tests/core/utilities.py +++ b/tests/core/utilities.py @@ -53,7 +53,7 @@ async def mocked_input(*args, **kwargs): def user_uttered( text: Text, confidence: float = 1.0, - metadata: Dict[Text, Any] = None, + metadata: Optional[Dict[Text, Any]] = None, timestamp: Optional[float] = None, ) -> UserUttered: parse_data = {"intent": {INTENT_NAME_KEY: text, "confidence": confidence}} diff --git a/tests/cdu/commands/__init__.py b/tests/dialogue_understanding/__init__.py similarity index 100% rename from tests/cdu/commands/__init__.py rename to tests/dialogue_understanding/__init__.py diff --git a/tests/cdu/generator/__init__.py b/tests/dialogue_understanding/commands/__init__.py similarity index 100% rename from tests/cdu/generator/__init__.py rename to tests/dialogue_understanding/commands/__init__.py diff --git a/tests/dialogue_understanding/commands/conftest.py b/tests/dialogue_understanding/commands/conftest.py new file mode 100644 index 000000000000..7741b820865b --- /dev/null +++ b/tests/dialogue_understanding/commands/conftest.py @@ -0,0 +1,43 @@ +import pytest + +from rasa.dialogue_understanding.commands import StartFlowCommand +from rasa.dialogue_understanding.processor.command_processor import execute_commands +from rasa.shared.core.events import UserUttered +from rasa.shared.core.flows.flow import FlowsList +from rasa.shared.core.trackers import DialogueStateTracker +from rasa.shared.nlu.constants import COMMANDS +from rasa.shared.core.flows.yaml_flows_io import flows_from_str + + +@pytest.fixture +def all_flows() -> FlowsList: + return flows_from_str( + """ + flows: + foo: + steps: + - id: first_step + action: action_listen + bar: + steps: + - id: also_first_step + action: action_listen + """ + ) + + +start_foo_user_uttered = UserUttered( + "start foo", None, None, {COMMANDS: [StartFlowCommand("foo").as_dict()]} +) + +start_bar_user_uttered = UserUttered( + "start bar", None, None, {COMMANDS: [StartFlowCommand("bar").as_dict()]} +) + + +@pytest.fixture +def tracker(all_flows: FlowsList) -> DialogueStateTracker: + # Creates a useful tracker that has a started flow and the current flows hashed + tracker = DialogueStateTracker.from_events("test", evts=[start_foo_user_uttered]) + execute_commands(tracker, all_flows) + return tracker diff --git a/tests/cdu/commands/test_can_not_handle_command.py b/tests/dialogue_understanding/commands/test_can_not_handle_command.py similarity index 100% rename from tests/cdu/commands/test_can_not_handle_command.py rename to tests/dialogue_understanding/commands/test_can_not_handle_command.py diff --git a/tests/cdu/commands/test_cancel_flow_command.py b/tests/dialogue_understanding/commands/test_cancel_flow_command.py similarity index 98% rename from tests/cdu/commands/test_cancel_flow_command.py rename to tests/dialogue_understanding/commands/test_cancel_flow_command.py index 6e902333cde9..890fb889222c 100644 --- a/tests/cdu/commands/test_cancel_flow_command.py +++ b/tests/dialogue_understanding/commands/test_cancel_flow_command.py @@ -8,7 +8,7 @@ from rasa.shared.core.constants import DIALOGUE_STACK_SLOT from rasa.shared.core.events import SlotSet from rasa.shared.core.trackers import DialogueStateTracker -from tests.utilities import flows_from_str +from rasa.shared.core.flows.yaml_flows_io import flows_from_str def test_command_name(): diff --git a/tests/cdu/commands/test_chit_chat_answer_command.py b/tests/dialogue_understanding/commands/test_chit_chat_answer_command.py similarity index 95% rename from tests/cdu/commands/test_chit_chat_answer_command.py rename to tests/dialogue_understanding/commands/test_chit_chat_answer_command.py index b222f91780af..d30983bcea68 100644 --- a/tests/cdu/commands/test_chit_chat_answer_command.py +++ b/tests/dialogue_understanding/commands/test_chit_chat_answer_command.py @@ -32,4 +32,4 @@ def test_run_command_on_tracker(): assert len(dialogue_stack_event.value) == 1 frame = dialogue_stack_event.value[0] - assert frame["type"] == "chitchat" + assert frame["type"] == "pattern_chitchat" diff --git a/tests/cdu/commands/test_clarify_command.py b/tests/dialogue_understanding/commands/test_clarify_command.py similarity index 97% rename from tests/cdu/commands/test_clarify_command.py rename to tests/dialogue_understanding/commands/test_clarify_command.py index c317830eeba0..b9630a5a618a 100644 --- a/tests/cdu/commands/test_clarify_command.py +++ b/tests/dialogue_understanding/commands/test_clarify_command.py @@ -3,7 +3,7 @@ from rasa.shared.core.constants import DIALOGUE_STACK_SLOT from rasa.shared.core.events import SlotSet from rasa.shared.core.trackers import DialogueStateTracker -from tests.utilities import flows_from_str +from rasa.shared.core.flows.yaml_flows_io import flows_from_str def test_command_name(): diff --git a/tests/cdu/commands/test_command.py b/tests/dialogue_understanding/commands/test_command.py similarity index 100% rename from tests/cdu/commands/test_command.py rename to tests/dialogue_understanding/commands/test_command.py diff --git a/tests/dialogue_understanding/commands/test_command_processor.py b/tests/dialogue_understanding/commands/test_command_processor.py new file mode 100644 index 000000000000..0e623c57c2a4 --- /dev/null +++ b/tests/dialogue_understanding/commands/test_command_processor.py @@ -0,0 +1,128 @@ +import pytest + +from rasa.dialogue_understanding.patterns.code_change import FLOW_PATTERN_CODE_CHANGE_ID +from rasa.dialogue_understanding.processor.command_processor import ( + execute_commands, + find_updated_flows, +) +from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack +from rasa.dialogue_understanding.stack.frames import ( + UserFlowStackFrame, + PatternFlowStackFrame, +) +from rasa.shared.core.constants import FLOW_HASHES_SLOT +from rasa.shared.core.flows.flow import FlowsList +from rasa.shared.core.trackers import DialogueStateTracker +from rasa.shared.core.flows.yaml_flows_io import flows_from_str +from tests.dialogue_understanding.commands.conftest import start_bar_user_uttered + + +def test_properly_prepared_tracker(tracker: DialogueStateTracker): + # flow hashes have been initialized + assert "foo" in tracker.get_slot(FLOW_HASHES_SLOT) + + # foo flow is on the stack + dialogue_stack = DialogueStack.from_tracker(tracker) + assert (top_frame := dialogue_stack.top()) + assert isinstance(top_frame, UserFlowStackFrame) + assert top_frame.flow_id == "foo" + + +def test_detects_no_changes_when_nothing_changed( + tracker: DialogueStateTracker, all_flows: FlowsList +): + assert find_updated_flows(tracker, all_flows) == set() + + +def test_detects_no_changes_for_not_started_flows( + tracker: DialogueStateTracker, +): + bar_changed_flows = flows_from_str( + """ + flows: + foo: + steps: + - id: first_step + action: action_listen + bar: + steps: + - id: also_first_step_BUT_CHANGED + action: action_listen + """ + ) + assert find_updated_flows(tracker, bar_changed_flows) == set() + + +change_cases = { + "step_id_changed": """ + flows: + foo: + steps: + - id: first_step_id_BUT_CHANGED + action: action_listen + bar: + steps: + - id: also_first_step + action: action_listen + """, + "action_changed": """ + flows: + foo: + steps: + - id: first_step_id + action: action_CHANGED + bar: + steps: + - id: also_first_step + action: action_listen + """, + "new_step": """ + flows: + foo: + steps: + - id: first_step_id + action: action_listen + next: second_step_id + - id: second_step_id + action: action_cool_stuff + bar: + steps: + - id: also_first_step + action: action_listen + """, + "flow_removed": """ + flows: + bar: + steps: + - id: also_first_step + action: action_listen + """, +} + + +@pytest.mark.parametrize("case, flow_yaml", list(change_cases.items())) +def test_detects_changes(case: str, flow_yaml: str, tracker: DialogueStateTracker): + all_flows = flows_from_str(flow_yaml) + assert find_updated_flows(tracker, all_flows) == {"foo"} + + +def test_starting_of_another_flow(tracker: DialogueStateTracker, all_flows: FlowsList): + """Tests that commands are not discarded when there is no change.""" + tracker.update_with_events([start_bar_user_uttered], None) + execute_commands(tracker, all_flows) + dialogue_stack = DialogueStack.from_tracker(tracker) + assert len(dialogue_stack.frames) == 2 + assert (top_frame := dialogue_stack.top()) + assert isinstance(top_frame, UserFlowStackFrame) + assert top_frame.flow_id == "bar" + + +def test_stack_cleaning_command_is_applied_on_changes(tracker: DialogueStateTracker): + all_flows = flows_from_str(change_cases["step_id_changed"]) + tracker.update_with_events([start_bar_user_uttered], None) + execute_commands(tracker, all_flows) + dialogue_stack = DialogueStack.from_tracker(tracker) + assert len(dialogue_stack.frames) == 2 + assert (top_frame := dialogue_stack.top()) + assert isinstance(top_frame, PatternFlowStackFrame) + assert top_frame.flow_id == FLOW_PATTERN_CODE_CHANGE_ID diff --git a/tests/cdu/commands/test_correct_slots_command.py b/tests/dialogue_understanding/commands/test_correct_slots_command.py similarity index 99% rename from tests/cdu/commands/test_correct_slots_command.py rename to tests/dialogue_understanding/commands/test_correct_slots_command.py index bf8578d766c9..72234cbd13a2 100644 --- a/tests/cdu/commands/test_correct_slots_command.py +++ b/tests/dialogue_understanding/commands/test_correct_slots_command.py @@ -15,7 +15,7 @@ from rasa.shared.core.constants import DIALOGUE_STACK_SLOT from rasa.shared.core.events import SlotSet from rasa.shared.core.trackers import DialogueStateTracker -from tests.utilities import flows_from_str +from rasa.shared.core.flows.yaml_flows_io import flows_from_str def test_command_name(): diff --git a/tests/cdu/commands/test_error_command.py b/tests/dialogue_understanding/commands/test_error_command.py similarity index 100% rename from tests/cdu/commands/test_error_command.py rename to tests/dialogue_understanding/commands/test_error_command.py diff --git a/tests/dialogue_understanding/commands/test_handle_code_change_command.py b/tests/dialogue_understanding/commands/test_handle_code_change_command.py new file mode 100644 index 000000000000..c1868d55f47c --- /dev/null +++ b/tests/dialogue_understanding/commands/test_handle_code_change_command.py @@ -0,0 +1,103 @@ +import pytest + +from rasa.core.channels import CollectingOutputChannel +from rasa.core.nlg import TemplatedNaturalLanguageGenerator +from rasa.dialogue_understanding.commands.handle_code_change_command import ( + HandleCodeChangeCommand, +) +from rasa.core.actions.action_clean_stack import ActionCleanStack + +from rasa.dialogue_understanding.patterns.code_change import FLOW_PATTERN_CODE_CHANGE_ID +from rasa.dialogue_understanding.processor.command_processor import execute_commands +from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack +from rasa.dialogue_understanding.stack.frames import ( + UserFlowStackFrame, + PatternFlowStackFrame, +) +from rasa.shared.core.domain import Domain +from rasa.shared.core.events import SlotSet +from rasa.shared.core.flows.flow import ( + FlowsList, + START_STEP, + ContinueFlowStep, + END_STEP, +) +from rasa.shared.core.trackers import DialogueStateTracker +from tests.dialogue_understanding.commands.test_command_processor import ( + start_bar_user_uttered, + change_cases, +) +from rasa.shared.core.flows.yaml_flows_io import flows_from_str + + +def test_name_of_command(): + # names of commands should not change as they are part of persisted + # trackers + assert HandleCodeChangeCommand.command() == "handle code change" + + +def test_from_dict(): + assert HandleCodeChangeCommand.from_dict({}) == HandleCodeChangeCommand() + + +def test_run_command_on_tracker(tracker: DialogueStateTracker, all_flows: FlowsList): + command = HandleCodeChangeCommand() + events = command.run_command_on_tracker(tracker, all_flows, tracker) + assert len(events) == 1 + dialogue_stack_event = events[0] + assert isinstance(dialogue_stack_event, SlotSet) + assert dialogue_stack_event.key == "dialogue_stack" + assert len(dialogue_stack_event.value) == 2 + + frame = dialogue_stack_event.value[1] + assert frame["type"] == FLOW_PATTERN_CODE_CHANGE_ID + + +@pytest.fixture +def about_to_be_cleaned_tracker(tracker: DialogueStateTracker, all_flows: FlowsList): + tracker.update_with_events([start_bar_user_uttered], None) + execute_commands(tracker, all_flows) + changed_flows = flows_from_str(change_cases["step_id_changed"]) + execute_commands(tracker, changed_flows) + dialogue_stack = DialogueStack.from_tracker(tracker) + assert len(dialogue_stack.frames) == 3 + + foo_frame = dialogue_stack.frames[0] + assert isinstance(foo_frame, UserFlowStackFrame) + assert foo_frame.flow_id == "foo" + assert foo_frame.step_id == START_STEP + + bar_frame = dialogue_stack.frames[1] + assert isinstance(bar_frame, UserFlowStackFrame) + assert bar_frame.flow_id == "bar" + assert bar_frame.step_id == START_STEP + + stack_clean_frame = dialogue_stack.frames[2] + assert isinstance(stack_clean_frame, PatternFlowStackFrame) + assert stack_clean_frame.flow_id == FLOW_PATTERN_CODE_CHANGE_ID + assert stack_clean_frame.step_id == START_STEP + + return tracker + + +async def test_stack_cleaning_action(about_to_be_cleaned_tracker: DialogueStateTracker): + events = await ActionCleanStack().run( + CollectingOutputChannel(), + TemplatedNaturalLanguageGenerator({}), + about_to_be_cleaned_tracker, + Domain.empty(), + ) + about_to_be_cleaned_tracker.update_with_events(events, None) + + dialogue_stack = DialogueStack.from_tracker(about_to_be_cleaned_tracker) + assert len(dialogue_stack.frames) == 3 + + foo_frame = dialogue_stack.frames[0] + assert isinstance(foo_frame, UserFlowStackFrame) + assert foo_frame.flow_id == "foo" + assert foo_frame.step_id == ContinueFlowStep.continue_step_for_id(END_STEP) + + bar_frame = dialogue_stack.frames[1] + assert isinstance(bar_frame, UserFlowStackFrame) + assert bar_frame.flow_id == "bar" + assert bar_frame.step_id == ContinueFlowStep.continue_step_for_id(END_STEP) diff --git a/tests/cdu/commands/test_human_handoff_command.py b/tests/dialogue_understanding/commands/test_human_handoff_command.py similarity index 100% rename from tests/cdu/commands/test_human_handoff_command.py rename to tests/dialogue_understanding/commands/test_human_handoff_command.py diff --git a/tests/cdu/commands/test_konwledge_answer_command.py b/tests/dialogue_understanding/commands/test_konwledge_answer_command.py similarity index 95% rename from tests/cdu/commands/test_konwledge_answer_command.py rename to tests/dialogue_understanding/commands/test_konwledge_answer_command.py index 6aa407f5e35e..84126253d05c 100644 --- a/tests/cdu/commands/test_konwledge_answer_command.py +++ b/tests/dialogue_understanding/commands/test_konwledge_answer_command.py @@ -32,4 +32,4 @@ def test_run_command_on_tracker(): assert len(dialogue_stack_event.value) == 1 frame = dialogue_stack_event.value[0] - assert frame["type"] == "search" + assert frame["type"] == "pattern_search" diff --git a/tests/cdu/commands/test_set_slot_command.py b/tests/dialogue_understanding/commands/test_set_slot_command.py similarity index 99% rename from tests/cdu/commands/test_set_slot_command.py rename to tests/dialogue_understanding/commands/test_set_slot_command.py index 370ff3de20d5..3b04c5c6dc88 100644 --- a/tests/cdu/commands/test_set_slot_command.py +++ b/tests/dialogue_understanding/commands/test_set_slot_command.py @@ -4,7 +4,7 @@ from rasa.shared.core.events import SlotSet from rasa.shared.core.flows.flow import FlowsList from rasa.shared.core.trackers import DialogueStateTracker -from tests.utilities import flows_from_str +from rasa.shared.core.flows.yaml_flows_io import flows_from_str def test_command_name(): diff --git a/tests/cdu/commands/test_start_flow_command.py b/tests/dialogue_understanding/commands/test_start_flow_command.py similarity index 98% rename from tests/cdu/commands/test_start_flow_command.py rename to tests/dialogue_understanding/commands/test_start_flow_command.py index 628cc0273379..8e411576dfa6 100644 --- a/tests/cdu/commands/test_start_flow_command.py +++ b/tests/dialogue_understanding/commands/test_start_flow_command.py @@ -3,7 +3,7 @@ from rasa.shared.core.constants import DIALOGUE_STACK_SLOT from rasa.shared.core.events import SlotSet from rasa.shared.core.trackers import DialogueStateTracker -from tests.utilities import flows_from_str +from rasa.shared.core.flows.yaml_flows_io import flows_from_str def test_command_name(): diff --git a/tests/cdu/stack/__init__.py b/tests/dialogue_understanding/generator/__init__.py similarity index 100% rename from tests/cdu/stack/__init__.py rename to tests/dialogue_understanding/generator/__init__.py diff --git a/tests/cdu/generator/test_command_generator.py b/tests/dialogue_understanding/generator/test_command_generator.py similarity index 100% rename from tests/cdu/generator/test_command_generator.py rename to tests/dialogue_understanding/generator/test_command_generator.py diff --git a/tests/cdu/stack/frames/__init__.py b/tests/dialogue_understanding/stack/__init__.py similarity index 100% rename from tests/cdu/stack/frames/__init__.py rename to tests/dialogue_understanding/stack/__init__.py diff --git a/tests/dialogue_understanding/stack/frames/__init__.py b/tests/dialogue_understanding/stack/frames/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/cdu/stack/frames/test_chit_chat_frame.py b/tests/dialogue_understanding/stack/frames/test_chit_chat_frame.py similarity index 100% rename from tests/cdu/stack/frames/test_chit_chat_frame.py rename to tests/dialogue_understanding/stack/frames/test_chit_chat_frame.py diff --git a/tests/cdu/stack/frames/test_dialogue_stack_frame.py b/tests/dialogue_understanding/stack/frames/test_dialogue_stack_frame.py similarity index 100% rename from tests/cdu/stack/frames/test_dialogue_stack_frame.py rename to tests/dialogue_understanding/stack/frames/test_dialogue_stack_frame.py diff --git a/tests/cdu/stack/frames/test_flow_frame.py b/tests/dialogue_understanding/stack/frames/test_flow_frame.py similarity index 100% rename from tests/cdu/stack/frames/test_flow_frame.py rename to tests/dialogue_understanding/stack/frames/test_flow_frame.py diff --git a/tests/cdu/stack/frames/test_search_frame.py b/tests/dialogue_understanding/stack/frames/test_search_frame.py similarity index 100% rename from tests/cdu/stack/frames/test_search_frame.py rename to tests/dialogue_understanding/stack/frames/test_search_frame.py diff --git a/tests/cdu/stack/test_dialogue_stack.py b/tests/dialogue_understanding/stack/test_dialogue_stack.py similarity index 90% rename from tests/cdu/stack/test_dialogue_stack.py rename to tests/dialogue_understanding/stack/test_dialogue_stack.py index 25769624eb0e..fa0859f3a314 100644 --- a/tests/cdu/stack/test_dialogue_stack.py +++ b/tests/dialogue_understanding/stack/test_dialogue_stack.py @@ -4,6 +4,8 @@ ) from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack from rasa.dialogue_understanding.stack.frames.flow_stack_frame import UserFlowStackFrame +from rasa.shared.core.constants import DIALOGUE_STACK_SLOT +from rasa.shared.core.events import SlotSet def test_dialogue_stack_from_dict(): @@ -75,6 +77,24 @@ def test_dialogue_stack_as_dict(): ] +def test_dialogue_stack_as_event(): + # check that the stack gets persisted as an event storing the dict + stack = DialogueStack( + frames=[ + UserFlowStackFrame( + flow_id="foo", step_id="first_step", frame_id="some-frame-id" + ), + CollectInformationPatternFlowStackFrame( + collect="foo", + frame_id="some-other-id", + utter="utter_ask_foo", + ), + ] + ) + + assert stack.persist_as_event() == SlotSet(DIALOGUE_STACK_SLOT, stack.as_dict()) + + def test_dialogue_stack_as_dict_handles_empty(): stack = DialogueStack(frames=[]) assert stack.as_dict() == [] diff --git a/tests/cdu/stack/test_utils.py b/tests/dialogue_understanding/stack/test_utils.py similarity index 78% rename from tests/cdu/stack/test_utils.py rename to tests/dialogue_understanding/stack/test_utils.py index 4d7cf485522f..ecf4c206fa23 100644 --- a/tests/cdu/stack/test_utils.py +++ b/tests/dialogue_understanding/stack/test_utils.py @@ -5,12 +5,13 @@ from rasa.dialogue_understanding.stack.frames.chit_chat_frame import ChitChatStackFrame from rasa.dialogue_understanding.stack.frames.flow_stack_frame import UserFlowStackFrame from rasa.dialogue_understanding.stack.utils import ( + end_top_user_flow, filled_slots_for_active_flow, top_flow_frame, top_user_flow_frame, user_flows_on_the_stack, ) -from tests.utilities import flows_from_str +from rasa.shared.core.flows.yaml_flows_io import flows_from_str def test_top_flow_frame_ignores_pattern(): @@ -199,3 +200,52 @@ def test_filled_slots_for_active_flow_only_collects_till_top_most_user_flow_fram stack = DialogueStack(frames=[another_user_frame, user_frame]) assert filled_slots_for_active_flow(stack, all_flows) == {"foo", "bar"} + + +def test_end_top_user_flow(): + user_frame = UserFlowStackFrame( + flow_id="my_flow", step_id="collect_bar", frame_id="some-frame-id" + ) + pattern_frame = CollectInformationPatternFlowStackFrame( + collect="foo", frame_id="some-other-id" + ) + stack = DialogueStack(frames=[user_frame, pattern_frame]) + + end_top_user_flow(stack) + + assert len(stack.frames) == 2 + + assert stack.frames[0] == UserFlowStackFrame( + flow_id="my_flow", step_id="NEXT:END", frame_id="some-frame-id" + ) + assert stack.frames[1] == CollectInformationPatternFlowStackFrame( + collect="foo", frame_id="some-other-id", step_id="NEXT:END" + ) + + +def test_end_top_user_flow_only_ends_topmost_user_frame(): + user_frame = UserFlowStackFrame( + flow_id="my_flow", step_id="collect_bar", frame_id="some-frame-id" + ) + other_user_frame = UserFlowStackFrame( + flow_id="my_other_flow", step_id="collect_bar2", frame_id="some-other-id" + ) + stack = DialogueStack(frames=[other_user_frame, user_frame]) + + end_top_user_flow(stack) + + assert len(stack.frames) == 2 + + assert stack.frames[0] == UserFlowStackFrame( + flow_id="my_other_flow", step_id="collect_bar2", frame_id="some-other-id" + ) + assert stack.frames[1] == UserFlowStackFrame( + flow_id="my_flow", step_id="NEXT:END", frame_id="some-frame-id" + ) + + +def test_end_top_user_flow_handles_empty(): + stack = DialogueStack(frames=[]) + end_top_user_flow(stack) + + assert len(stack.frames) == 0 diff --git a/tests/graph_components/validators/test_default_recipe_validator.py b/tests/graph_components/validators/test_default_recipe_validator.py index f462afeeff3a..165909767d45 100644 --- a/tests/graph_components/validators/test_default_recipe_validator.py +++ b/tests/graph_components/validators/test_default_recipe_validator.py @@ -812,7 +812,7 @@ def test_core_raise_if_domain_contains_form_names_but_no_rule_policy_given( importer = DummyImporter(domain=domain_with_form) graph_schema = GraphSchema( { - "policy": SchemaNode({}, policy_type, "", "", {}) + "policy": SchemaNode({}, policy_type, "", "", {}) # noqa: RUF011 for policy_type in policy_types } ) diff --git a/tests/nlu/classifiers/test_diet_classifier.py b/tests/nlu/classifiers/test_diet_classifier.py index 52ae4336fb14..1f0c37a85faa 100644 --- a/tests/nlu/classifiers/test_diet_classifier.py +++ b/tests/nlu/classifiers/test_diet_classifier.py @@ -23,6 +23,7 @@ PREDICTED_CONFIDENCE_KEY, INTENT_NAME_KEY, ) +from rasa.utils import train_utils from rasa.utils.tensorflow.constants import ( LOSS_TYPE, RANDOM_SEED, @@ -966,3 +967,80 @@ async def test_no_bilou_when_entity_recognition_off( diet.train(training_data=training_data) assert all(msg.get(BILOU_ENTITIES) is None for msg in training_data.nlu_examples) + + +@pytest.mark.timeout(120, func_only=True) +@pytest.mark.parametrize( + "batch_size, expected_num_batches", + # the training dataset has 48 NLU examples + [ + (1, 48), + (8, 6), + (15, 3), + (16, 3), + (18, 3), + (20, 2), + (32, 2), + (64, 1), + (128, 1), + (256, 1), + ], +) +async def test_dropping_of_last_partial_batch( + batch_size: int, + expected_num_batches: int, + create_diet: Callable[..., DIETClassifier], + train_and_preprocess: Callable[..., Tuple[TrainingData, List[GraphComponent]]], +): + """test that diets data processing produces the right amount of batches. + + We introduced a change to only keep the last incomplete batch if + 1. it has more than 50% of examples of batch size + 2. or it is the only batch in the epoch + """ + + pipeline = [ + {"component": WhitespaceTokenizer}, + {"component": CountVectorsFeaturizer}, + ] + diet = create_diet( + {ENTITY_RECOGNITION: False, RANDOM_SEED: 1, EPOCHS: 1, RUN_EAGERLY: True} + ) + # This data set has 48 NLU examples + training_data, loaded_pipeline = train_and_preprocess( + pipeline, training_data="data/test/demo-rasa-no-ents.yml" + ) + + model_data = diet.preprocess_train_data(training_data) + data_generator, _ = train_utils.create_data_generators(model_data, batch_size, 1) + + assert len(data_generator) == expected_num_batches + + +@pytest.mark.timeout(120, func_only=True) +async def test_dropping_of_last_partial_batch_empty_data( + create_diet: Callable[..., DIETClassifier], + train_and_preprocess: Callable[..., Tuple[TrainingData, List[GraphComponent]]], +): + """test that diets data processing produces the right amount of batches. + + We introduced a change to only keep the last incomplete batch if + 1. it has more than 50% of examples of batch size + 2. or it is the only batch in the epoch + """ + + pipeline = [ + {"component": WhitespaceTokenizer}, + {"component": CountVectorsFeaturizer}, + ] + diet = create_diet( + {ENTITY_RECOGNITION: False, RANDOM_SEED: 1, EPOCHS: 1, RUN_EAGERLY: True} + ) + training_data, loaded_pipeline = train_and_preprocess( + pipeline, training_data=TrainingData() + ) + + model_data = diet.preprocess_train_data(training_data) + data_generator, _ = train_utils.create_data_generators(model_data, 64, 1) + + assert len(data_generator) == 0 diff --git a/tests/nlu/featurizers/test_regex_featurizer.py b/tests/nlu/featurizers/test_regex_featurizer.py index a664c77162c1..b1b9fdc2bbf6 100644 --- a/tests/nlu/featurizers/test_regex_featurizer.py +++ b/tests/nlu/featurizers/test_regex_featurizer.py @@ -28,7 +28,7 @@ def create_featurizer( resource: Resource, ) -> Callable[..., RegexFeaturizer]: def inner( - config: Dict[Text, Any] = None, + config: Optional[Dict[Text, Any]] = None, known_patterns: Optional[List[Dict[Text, Any]]] = None, ) -> RegexFeaturizer: config = config or {} diff --git a/tests/shared/core/test_domain.py b/tests/shared/core/test_domain.py index b70e1dc5b3a8..08e8164ef81f 100644 --- a/tests/shared/core/test_domain.py +++ b/tests/shared/core/test_domain.py @@ -1,5 +1,6 @@ import copy import json +import logging import re import textwrap from pathlib import Path @@ -7,6 +8,7 @@ from typing import Dict, List, Text, Any, Union, Set, Optional import pytest +from pytest import LogCaptureFixture from pytest import WarningsRecorder from rasa.shared.exceptions import YamlSyntaxException, YamlException @@ -27,6 +29,7 @@ DEFAULT_KNOWLEDGE_BASE_ACTION, ENTITY_LABEL_SEPARATOR, DEFAULT_ACTION_NAMES, + DEFAULT_SLOT_NAMES, ) from rasa.shared.core.domain import ( InvalidDomain, @@ -888,10 +891,9 @@ def test_domain_from_multiple_files(): "utter_default": [{"text": "default message"}], "utter_amazement": [{"text": "awesomness!"}], } - expected_slots = [ + expected_slots = list(DEFAULT_SLOT_NAMES) + [ "activate_double_simulation", "activate_simulation", - "dialogue_stack", "display_cure_method", "display_drum_cure_horns", "display_method_artwork", @@ -914,9 +916,6 @@ def test_domain_from_multiple_files(): "humbleSelectionManagement", "humbleSelectionStatus", "offers", - "requested_slot", - "return_value", - "session_started_metadata", ] domain_slots = [] @@ -930,7 +929,7 @@ def test_domain_from_multiple_files(): assert expected_responses == domain.responses assert expected_forms == domain.forms assert domain.session_config.session_expiration_time == 360 - assert expected_slots == sorted(domain_slots) + assert sorted(expected_slots) == sorted(domain_slots) def test_domain_warnings(domain: Domain): @@ -2355,3 +2354,20 @@ def test_merge_yaml_domains_loads_actions_which_explicitly_need_domain(): def test_domain_responses_with_ids_are_loaded(domain_yaml, expected) -> None: domain = Domain.from_yaml(domain_yaml) assert domain.responses == expected + + +def test_domain_with_slots_without_mappings(caplog: LogCaptureFixture) -> None: + domain_yaml = """ + slots: + slot_without_mappings: + type: text + """ + with caplog.at_level(logging.WARN): + domain = Domain.from_yaml(domain_yaml) + + assert isinstance(domain.slots[0].mappings, list) + assert len(domain.slots[0].mappings) == 0 + assert ( + "Slot 'slot_without_mappings' has no mappings defined. " + "We will continue with an empty list of mappings." + ) in caplog.text diff --git a/tests/shared/core/test_slot_mappings.py b/tests/shared/core/test_slot_mappings.py index 7e67f388b102..07be20e270e2 100644 --- a/tests/shared/core/test_slot_mappings.py +++ b/tests/shared/core/test_slot_mappings.py @@ -103,19 +103,6 @@ def test_slot_mappings_ignored_intents_during_active_loop(): ) -def test_missing_slot_mappings_raises(): - with pytest.raises(YamlValidationException): - Domain.from_yaml( - f""" - version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" - slots: - some_slot: - type: text - influence_conversation: False - """ - ) - - def test_slot_mappings_invalid_type_raises(): with pytest.raises(YamlValidationException): Domain.from_yaml( diff --git a/tests/shared/core/training_data/story_reader/test_yaml_story_reader.py b/tests/shared/core/training_data/story_reader/test_yaml_story_reader.py index 1bbb08bdd7d0..f875db2aa178 100644 --- a/tests/shared/core/training_data/story_reader/test_yaml_story_reader.py +++ b/tests/shared/core/training_data/story_reader/test_yaml_story_reader.py @@ -716,7 +716,7 @@ def test_can_read_test_story(domain: Domain): # this should be the story simple_story_with_only_end -> show_it_all # the generated stories are in a non stable order - therefore we need to # do some trickery to find the one we want to test - tracker = [t for t in trackers if len(t.events) == 5][0] + tracker = [t for t in trackers if len(t.events) == 5][0] # noqa: RUF015 assert tracker.events[0] == ActionExecuted("action_listen") assert tracker.events[1] == UserUttered( intent={INTENT_NAME_KEY: "simple", "confidence": 1.0}, @@ -775,12 +775,17 @@ def test_generate_training_data_with_cycles(domain: Domain): # deterministic way but should always be 3 or 4 assert len(training_trackers) == 3 or len(training_trackers) == 4 - # if we have 4 trackers, there is going to be one example more for label 10 - num_tens = len(training_trackers) - 1 - # if new default actions are added the keys of the actions will be changed + # if we have 4 trackers, there is going to be one example more for utter_default + num_utter_default = len(training_trackers) - 1 all_label_ids = [id for ids in label_ids for id in ids] - assert Counter(all_label_ids) == {0: 6, 20: 3, 19: num_tens, 1: 2, 21: 1} + assert Counter(all_label_ids) == { + 0: 6, + domain.action_names_or_texts.index("utter_goodbye"): 3, + domain.action_names_or_texts.index("utter_default"): num_utter_default, + 1: 2, + domain.action_names_or_texts.index("utter_greet"): 1, + } def test_generate_training_data_with_unused_checkpoints(domain: Domain): diff --git a/tests/shared/importers/test_rasa.py b/tests/shared/importers/test_rasa.py index 88025312edf9..a739f1c71c97 100644 --- a/tests/shared/importers/test_rasa.py +++ b/tests/shared/importers/test_rasa.py @@ -11,9 +11,8 @@ from rasa.shared.core.constants import ( DEFAULT_ACTION_NAMES, DEFAULT_INTENTS, - SESSION_START_METADATA_SLOT, - DIALOGUE_STACK_SLOT, - RETURN_VALUE_SLOT, + DEFAULT_SLOT_NAMES, + REQUESTED_SLOT, ) from rasa.shared.core.domain import Domain from rasa.shared.core.slots import AnySlot @@ -30,11 +29,14 @@ def test_rasa_file_importer(project: Text): domain = importer.get_domain() assert len(domain.intents) == 7 + len(DEFAULT_INTENTS) - assert domain.slots == [ - AnySlot(DIALOGUE_STACK_SLOT, mappings=[{}]), - AnySlot(RETURN_VALUE_SLOT, mappings=[{}]), - AnySlot(SESSION_START_METADATA_SLOT, mappings=[{}]), + default_slots = [ + AnySlot(slot_name, mappings=[{}]) + for slot_name in DEFAULT_SLOT_NAMES + if slot_name != REQUESTED_SLOT ] + assert sorted(domain.slots, key=lambda s: s.name) == sorted( + default_slots, key=lambda s: s.name + ) assert domain.entities == [] assert len(domain.action_names_or_texts) == 6 + len(DEFAULT_ACTION_NAMES) diff --git a/tests/shared/test_data.py b/tests/shared/test_data.py index 805eb68c56c3..2b5e122e4bc6 100644 --- a/tests/shared/test_data.py +++ b/tests/shared/test_data.py @@ -81,7 +81,7 @@ def test_get_core_nlu_files(project): [data_dir], YAMLStoryReader.is_stories_file ) assert len(nlu_files) == 1 - assert list(nlu_files)[0].endswith("nlu.yml") + assert list(nlu_files)[0].endswith("nlu.yml") # noqa: RUF015 assert len(core_files) == 2 assert any(file.endswith("stories.yml") for file in core_files) diff --git a/tests/shared/utils/test_validation.py b/tests/shared/utils/test_validation.py index c38f8230baea..e9e35b26a793 100644 --- a/tests/shared/utils/test_validation.py +++ b/tests/shared/utils/test_validation.py @@ -4,6 +4,7 @@ import pytest from pep440_version_utils import Version +from rasa.shared.core.flows.yaml_flows_io import FLOWS_SCHEMA_FILE from rasa.shared.exceptions import YamlException, SchemaValidationError import rasa.shared.utils.io @@ -16,7 +17,10 @@ LATEST_TRAINING_DATA_FORMAT_VERSION, ) from rasa.shared.nlu.training_data.formats.rasa_yaml import NLU_SCHEMA_FILE -from rasa.shared.utils.validation import KEY_TRAINING_DATA_FORMAT_VERSION +from rasa.shared.utils.validation import ( + KEY_TRAINING_DATA_FORMAT_VERSION, + validate_yaml_with_jsonschema, +) @pytest.mark.parametrize( @@ -380,3 +384,297 @@ def validate() -> None: thread.join() assert len(successful_results) == len(threads) + + +@pytest.mark.parametrize( + "flow_yaml", + [ + """flows: + replace_eligible_card: + description: Never predict StartFlow for this flow, users are not able to trigger. + name: replace eligible card + steps: + - collect: replacement_reason + next: + - if: replacement_reason == "lost" + then: + - collect: was_card_used_fraudulently + ask_before_filling: true + next: + - if: was_card_used_fraudulently + then: + - action: utter_report_fraud + next: END + - else: start_replacement + - if: "replacement_reason == 'damaged'" + then: start_replacement + - else: + - action: utter_unknown_replacement_reason_handover + next: END + - id: start_replacement + action: utter_will_cancel_and_send_new + - action: utter_new_card_has_been_ordered""", + """flows: + replace_card: + description: The user needs to replace their card. + name: replace_card + steps: + - collect: confirm_correct_card + ask_before_filling: true + next: + - if: "confirm_correct_card" + then: + - link: "replace_eligible_card" + - else: + - action: utter_relevant_card_not_linked + next: END + """, + f""" +version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" +flows: + transfer_money: + description: This flow lets users send money. + name: transfer money + steps: + - id: "ask_recipient" + collect: transfer_recipient + next: "ask_amount" + - id: "ask_amount" + collect: transfer_amount + next: "execute_transfer" + - id: "execute_transfer" + action: action_transfer_money""", + """flows: + setup_recurrent_payment: + name: setup recurrent payment + steps: + - collect: recurrent_payment_type + rejections: + - if: not ({"direct debit" "standing order"} contains recurrent_payment_type) + utter: utter_invalid_recurrent_payment_type + description: the type of payment + - collect: recurrent_payment_recipient + utter: utter_ask_recipient + description: the name of a person + - collect: recurrent_payment_amount_of_money + description: the amount of money without any currency designation + - collect: recurrent_payment_frequency + description: the frequency of the payment + rejections: + - if: not ({"monthly" "yearly"} contains recurrent_payment_frequency) + utter: utter_invalid_recurrent_payment_frequency + - collect: recurrent_payment_start_date + description: the start date of the payment + - collect: recurrent_payment_end_date + description: the end date of the payment + rejections: + - if: recurrent_payment_end_date < recurrent_payment_start_date + utter: utter_invalid_recurrent_payment_end_date + - collect: recurrent_payment_confirmation + description: accepts True or False + ask_before_filling: true + next: + - if: not recurrent_payment_confirmation + then: + - action: utter_payment_cancelled + next: END + - else: "execute_payment" + - id: "execute_payment" + action: action_execute_recurrent_payment + next: + - if: setup_recurrent_payment_successful + then: + - action: utter_payment_complete + next: END + - else: "payment_failed" + - id: "payment_failed" + action: utter_payment_failed + - action: utter_failed_payment_handover + - action: utter_failed_handoff""", + """ + flows: + foo_flow: + steps: + - id: "1" + set_slots: + - foo: bar + next: "2" + - id: "2" + action: action_listen + next: "1" + """, + """ + flows: + test_flow: + description: Test flow + steps: + - id: "1" + action: action_xyz + next: "2" + - id: "2" + action: utter_ask_name""", + ], +) +def test_flow_validation_pass(flow_yaml: str) -> None: + # test fails if exception is raised + validate_yaml_with_jsonschema(flow_yaml, FLOWS_SCHEMA_FILE) + + +@pytest.mark.parametrize( + "flow_yaml, error_msg", + [ + ("""flows:""", "None is not of type 'object'."), + ( + """flows: + test: + name: test + steps:""", + ("None is not of type 'array'."), + ), + ( + """flows: + test: + - id: test""", + "[ordereddict([('id', 'test')])] is not of type 'object'.", + ), + ( + """flows: + test: + name: test + steps: + - collect: recurrent_payment_type + rejections: + - if: not ({"direct debit" "standing order"} contains recurrent_payment_type) + utter: utter_invalid_recurrent_payment_type + desc: the type of payment""", + ( + "('desc', 'the type of payment')]) is not valid" + " under any of the given schemas." + ), + ), + ( # next is a Bool + """flows: + test: + name: test + steps: + - collect: confirm_correct_card + ask_before_filling: true + next: + - if: "confirm_correct_card" + then: + - link: "replace_eligible_card" + - else: + - action: utter_relevant_card_not_linked + next: True""", + "('next', True)])])])])]) is not valid under any of the given schemas.", + ), + ( # just next and ask_before_filling + """flows: + test: + name: test + steps: + - ask_before_filling: true + next: + - if: "confirm_correct_card" + then: + - link: "replace_eligible_card" + - else: + - action: utter_relevant_card_not_linked + next: END""", + ( + "('if', 'confirm_correct_card'), ('then'," + " [ordereddict([('link', 'replace_eligible_card')])])]), " + "ordereddict([('else', [ordereddict([('action', " + "'utter_relevant_card_not_linked'), ('next', 'END')])])])]" + " is not of type 'null'. Failed to validate data," + " make sure your data is valid." + ), + ), + ( # action added to collect + """flows: + test: + steps: + - collect: confirm_correct_card + action: utter_xyz + ask_before_filling: true""", + ( + "([('collect', 'confirm_correct_card'), ('action', 'utter_xyz')," + " ('ask_before_filling', True)])" + " is not valid under any of the given schemas." + ), + ), + ( # random addition to action + """flows: + test: + steps: + - action: utter_xyz + random_xyz: true + next: END""", + "Failed validating 'type' in schema[2]['properties']['next']", + ), + ( # random addition to collect + """flows: + test: + steps: + - collect: confirm_correct_card + random_xyz: utter_xyz + ask_before_filling: true""", + ( + "ordereddict([('collect', 'confirm_correct_card'), " + "('random_xyz', 'utter_xyz'), ('ask_before_filling', True)])" + " is not valid under any of the given schemas." + ), + ), + ( # random addition to flow definition + """flows: + test: + random_xyz: True + steps: + - action: utter_xyz + next: id-21312""", + "Additional properties are not allowed ('random_xyz' was unexpected).", + ), + ( + """flows: + test: + steps: + - action: True + next: id-2132""", + ( + "ordereddict([('action', True), ('next', 'id-2132')])" + " is not valid under any of the given schemas." + ), + ), + ( # next is a step + """flows: + test: + steps: + - action: xyz + next: + - action: utter_xyz""", + ( + "([('action', 'xyz'), ('next'," + " [ordereddict([('action', 'utter_xyz')])])])" + " is not valid under any of the given schemas." + ), + ), + ( # next is without then + """flows: + test: + steps: + - action: xyz + next: + - if: xyz""", + ( + "([('action', 'xyz'), ('next', [ordereddict([('if', 'xyz')])])])" + " is not valid under any of the given schemas." + ), + ), + ], +) +def test_flow_validation_fail(flow_yaml: str, error_msg: str) -> None: + with pytest.raises(SchemaValidationError) as e: + rasa.shared.utils.validation.validate_yaml_with_jsonschema( + flow_yaml, FLOWS_SCHEMA_FILE + ) + assert error_msg in str(e.value) diff --git a/tests/test_server.py b/tests/test_server.py index 8467693ed24e..cb9962a56073 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -57,6 +57,7 @@ SESSION_START_METADATA_SLOT, DIALOGUE_STACK_SLOT, RETURN_VALUE_SLOT, + FLOW_HASHES_SLOT, ) from rasa.shared.core.domain import Domain, SessionConfig from rasa.shared.core.events import ( @@ -1117,6 +1118,7 @@ async def test_requesting_non_existent_tracker(rasa_app: SanicASGITestClient): assert content["slots"] == { "name": None, REQUESTED_SLOT: None, + FLOW_HASHES_SLOT: None, SESSION_START_METADATA_SLOT: None, DIALOGUE_STACK_SLOT: None, RETURN_VALUE_SLOT: None, @@ -2214,7 +2216,7 @@ async def test_get_tracker_with_query_param_include_events_after_restart( serialized_actual_events = tracker["events"] - restarted_event = [ + restarted_event = [ # noqa: RUF015 event for event in events_to_store if isinstance(event, Restarted) ][0] truncated_events = events_to_store[events_to_store.index(restarted_event) + 1 :] @@ -2245,11 +2247,11 @@ async def test_get_tracker_with_query_param_include_events_applied( serialized_actual_events = tracker["events"] - restarted_event = [ + restarted_event = [ # noqa: RUF015 event for event in events_to_store if isinstance(event, Restarted) ][0] truncated_events = events_to_store[events_to_store.index(restarted_event) + 1 :] - session_started = [ + session_started = [ # noqa: RUF015 event for event in truncated_events if isinstance(event, SessionStarted) ][0] truncated_events = truncated_events[truncated_events.index(session_started) + 1 :] diff --git a/tests/test_validator.py b/tests/test_validator.py index d901d73fb333..d55535d71982 100644 --- a/tests/test_validator.py +++ b/tests/test_validator.py @@ -1,6 +1,7 @@ +import logging import textwrap import warnings -from typing import Text +from typing import Any, Dict, List, Text import pytest from _pytest.logging import LogCaptureFixture @@ -856,3 +857,601 @@ def test_warn_if_config_mandatory_keys_are_not_set_invalid_paths( with pytest.warns(UserWarning, match=message): validator.warn_if_config_mandatory_keys_are_not_set() + + +@pytest.mark.parametrize( + "domain_actions, domain_slots, log_message", + [ + # set_slot slot is not listed in the domain + ( + ["action_transfer_money"], + {"transfer_amount": {"type": "float", "mappings": []}}, + "The slot 'account_type' is used in the step 'set_account_type' " + "of flow id 'transfer_money', but it is not listed in the domain slots.", + ), + # collect slot is not listed in the domain + ( + ["action_transfer_money"], + {"account_type": {"type": "text", "mappings": []}}, + "The slot 'transfer_amount' is used in the step 'ask_amount' " + "of flow id 'transfer_money', but it is not listed in the domain slots.", + ), + # action name is not listed in the domain + ( + [], + { + "account_type": {"type": "text", "mappings": []}, + "transfer_amount": {"type": "float", "mappings": []}, + }, + "The action 'action_transfer_money' is used in the step 'execute_transfer' " + "of flow id 'transfer_money', but it is not listed in the domain file.", + ), + ], +) +def test_verify_flow_steps_against_domain_fail( + tmp_path: Path, + nlu_data_path: Path, + domain_actions: List[Text], + domain_slots: Dict[Text, Any], + log_message: Text, + caplog: LogCaptureFixture, +) -> None: + flows_file = tmp_path / "flows.yml" + with open(flows_file, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + flows: + transfer_money: + description: This flow lets users send money. + name: transfer money + steps: + - id: "ask_amount" + collect: transfer_amount + next: "set_account_type" + - id: "set_account_type" + set_slots: + - account_type: "debit" + next: "execute_transfer" + - id: "execute_transfer" + action: action_transfer_money + """ + ) + domain_file = tmp_path / "domain.yml" + with open(domain_file, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + intents: + - greet + slots: + {domain_slots} + actions: {domain_actions} + """ + ) + importer = RasaFileImporter( + config_file="data/test_moodbot/config.yml", + domain_path=str(domain_file), + training_data_paths=[str(flows_file), str(nlu_data_path)], + ) + + validator = Validator.from_importer(importer) + + with caplog.at_level(logging.ERROR): + assert not validator.verify_flows_steps_against_domain() + + assert log_message in caplog.text + + +def test_verify_flow_steps_against_domain_disallowed_list_slot( + tmp_path: Path, + nlu_data_path: Path, + caplog: LogCaptureFixture, +) -> None: + flows_file = tmp_path / "flows.yml" + with open(flows_file, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + flows: + order_pizza: + description: This flow lets users order their favourite pizza. + name: order pizza + steps: + - id: "ask_pizza_toppings" + collect: pizza_toppings + next: "ask_address" + - id: "ask_address" + collect: address + """ + ) + domain_file = tmp_path / "domain.yml" + with open(domain_file, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + intents: + - greet + slots: + pizza_toppings: + type: list + mappings: [] + address: + type: text + mappings: [] + """ + ) + importer = RasaFileImporter( + config_file="data/test_moodbot/config.yml", + domain_path=str(domain_file), + training_data_paths=[str(flows_file), str(nlu_data_path)], + ) + + validator = Validator.from_importer(importer) + + with caplog.at_level(logging.ERROR): + assert not validator.verify_flows_steps_against_domain() + + assert ( + "The slot 'pizza_toppings' is used in the step 'ask_pizza_toppings' " + "of flow id 'order_pizza', but it is a list slot. List slots are " + "currently not supported in flows." in caplog.text + ) + + +def test_verify_flow_steps_against_domain_dialogue_stack_slot( + tmp_path: Path, + nlu_data_path: Path, + caplog: LogCaptureFixture, +) -> None: + flows_file = tmp_path / "flows.yml" + with open(flows_file, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + flows: + my_flow: + description: Test that dialogue stack is not modified in flows. + name: test flow + steps: + - id: "ask_internal_slot" + collect: dialogue_stack + """ + ) + domain_file = tmp_path / "domain.yml" + with open(domain_file, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + intents: + - greet + """ + ) + importer = RasaFileImporter( + config_file="data/test_moodbot/config.yml", + domain_path=str(domain_file), + training_data_paths=[str(flows_file), str(nlu_data_path)], + ) + + validator = Validator.from_importer(importer) + + with caplog.at_level(logging.ERROR): + assert not validator.verify_flows_steps_against_domain() + + assert ( + "The slot 'dialogue_stack' is used in the step 'ask_internal_slot' " + "of flow id 'my_flow', but it is a reserved slot." in caplog.text + ) + + +def test_verify_flow_steps_against_domain_interpolated_action_name( + caplog: LogCaptureFixture, + tmp_path: Path, + nlu_data_path: Path, +) -> None: + flows_file = tmp_path / "flows.yml" + with open(flows_file, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + flows: + pattern_collect_information: + description: Test that interpolated names log a warning. + name: test flow + steps: + - id: "validate" + action: "validate_{{context.collect}}" + """ + ) + domain_file = tmp_path / "domain.yml" + with open(domain_file, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + intents: + - greet + """ + ) + importer = RasaFileImporter( + config_file="data/test_moodbot/config.yml", + domain_path=str(domain_file), + training_data_paths=[str(flows_file), str(nlu_data_path)], + ) + + validator = Validator.from_importer(importer) + + with caplog.at_level(logging.WARNING): + assert validator.verify_flows_steps_against_domain() + assert ( + "An interpolated action name 'validate_{context.collect}' was found " + "at step 'validate' of flow id 'pattern_collect_information'. " + "Skipping validation for this step." in caplog.text + ) + + +def test_verify_unique_flows_duplicate_names( + tmp_path: Path, + nlu_data_path: Path, + caplog: LogCaptureFixture, +) -> None: + duplicate_flow_name = "transfer money" + flows_file = tmp_path / "flows.yml" + with open(flows_file, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + flows: + transfer_money: + description: This flow lets users send money. + name: {duplicate_flow_name} + steps: + - id: "ask_recipient" + collect: transfer_recipient + next: "ask_amount" + - id: "ask_amount" + collect: transfer_amount + next: "execute_transfer" + - id: "execute_transfer" + action: action_transfer_money + recurrent_payment: + description: This flow sets up a recurrent payment. + name: {duplicate_flow_name} + steps: + - id: "set_up_recurrence" + action: action_set_up_recurrent_payment + """ + ) + domain_file = tmp_path / "domain.yml" + with open(domain_file, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + intents: + - greet + slots: + transfer_recipient: + type: text + mappings: [] + transfer_amount: + type: float + mappings: [] + actions: + - action_transfer_money + - action_set_up_recurrent_payment + """ + ) + importer = RasaFileImporter( + config_file="data/test_moodbot/config.yml", + domain_path=str(domain_file), + training_data_paths=[str(flows_file), str(nlu_data_path)], + ) + + validator = Validator.from_importer(importer) + + with caplog.at_level(logging.ERROR): + assert not validator.verify_unique_flows() + + assert ( + f"Detected duplicate flow name '{duplicate_flow_name}' for " + f"flow id 'recurrent_payment'. Flow names must be unique. " + f"Please make sure that all flows have different names." + ) in caplog.text + + +def test_verify_flow_names_non_empty( + tmp_path: Path, + nlu_data_path: Path, + caplog: LogCaptureFixture, +) -> None: + flows_file = tmp_path / "flows.yml" + with open(flows_file, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + flows: + transfer_money: + description: This flow lets users send money. + name: "" + steps: + - collect: transfer_recipient + """ + ) + domain_file = tmp_path / "domain.yml" + with open(domain_file, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + slots: + transfer_recipient: + type: text + mappings: [] + """ + ) + importer = RasaFileImporter( + config_file="data/test_moodbot/config.yml", + domain_path=str(domain_file), + training_data_paths=[str(flows_file), str(nlu_data_path)], + ) + + validator = Validator.from_importer(importer) + + with caplog.at_level(logging.ERROR): + assert not validator.verify_unique_flows() + + assert "empty name" in caplog.text + assert "transfer_money" in caplog.text + + +def test_verify_unique_flows_duplicate_descriptions( + tmp_path: Path, + nlu_data_path: Path, + caplog: LogCaptureFixture, +) -> None: + duplicate_flow_description_with_punctuation = "This flow lets users send money." + duplicate_flow_description = "This flow lets users send money" + flows_file = tmp_path / "flows.yml" + with open(flows_file, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + flows: + transfer_money: + description: {duplicate_flow_description_with_punctuation} + name: transfer money + steps: + - id: "ask_recipient" + collect: transfer_recipient + next: "ask_amount" + - id: "ask_amount" + collect: transfer_amount + next: "execute_transfer" + - id: "execute_transfer" + action: action_transfer_money + recurrent_payment: + description: {duplicate_flow_description} + name: setup recurrent payment + steps: + - id: "set_up_recurrence" + action: action_set_up_recurrent_payment + """ + ) + domain_file = tmp_path / "domain.yml" + with open(domain_file, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + intents: + - greet + slots: + transfer_recipient: + type: text + mappings: [] + transfer_amount: + type: float + mappings: [] + actions: + - action_transfer_money + - action_set_up_recurrent_payment + """ + ) + importer = RasaFileImporter( + config_file="data/test_moodbot/config.yml", + domain_path=str(domain_file), + training_data_paths=[str(flows_file), str(nlu_data_path)], + ) + + validator = Validator.from_importer(importer) + + with caplog.at_level(logging.ERROR): + validator.verify_unique_flows() + + assert ( + "Detected duplicate flow description for flow id 'recurrent_payment'. " + "Flow descriptions must be unique. " + "Please make sure that all flows have different descriptions." + ) in caplog.text + + +def test_verify_predicates_invalid_rejection_if( + tmp_path: Path, + nlu_data_path: Path, + caplog: LogCaptureFixture, +) -> None: + predicate = 'account_type not in {{"debit", "savings"}}' + error_log = ( + f"Detected invalid rejection '{predicate}' " + f"at `collect` step 'ask_account_type' " + f"for flow id 'transfer_money'. " + f"Please make sure that all conditions are valid." + ) + flows_file = tmp_path / "flows.yml" + with open(flows_file, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + flows: + transfer_money: + description: This flow lets users send money. + name: transfer money + steps: + - id: "ask_account_type" + collect: account_type + rejections: + - if: {predicate} + utter: utter_invalid_account_type + next: "ask_recipient" + - id: "ask_recipient" + collect: transfer_recipient + next: "ask_amount" + - id: "ask_amount" + collect: transfer_amount + next: "execute_transfer" + - id: "execute_transfer" + action: action_transfer_money + recurrent_payment: + description: This flow setups recurrent payments + name: setup recurrent payment + steps: + - id: "set_up_recurrence" + action: action_set_up_recurrent_payment + """ + ) + domain_file = tmp_path / "domain.yml" + with open(domain_file, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + intents: + - greet + slots: + transfer_recipient: + type: text + mappings: [] + transfer_amount: + type: float + mappings: [] + actions: + - action_transfer_money + - action_set_up_recurrent_payment + """ + ) + importer = RasaFileImporter( + config_file="data/test_moodbot/config.yml", + domain_path=str(domain_file), + training_data_paths=[str(flows_file), str(nlu_data_path)], + ) + + validator = Validator.from_importer(importer) + + with caplog.at_level(logging.ERROR): + assert not validator.verify_predicates() + + assert error_log in caplog.text + + +@pytest.fixture +def domain_file_name(tmp_path: Path) -> Path: + domain_file_name = tmp_path / "domain.yml" + with open(domain_file_name, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + responses: + utter_ask_recipient: + - text: "Who do you want to send money to?" + utter_ask_amount: + - text: "How much do you want to send?" + utter_amount_too_high: + - text: "Sorry, you can only send up to 1000." + utter_transfer_summary: + - text: You are sending {{amount}} to {{transfer_recipient}}. + """ + ) + return domain_file_name + + +def test_verify_utterances_in_dialogues_finds_all_responses_in_flows( + tmp_path: Path, nlu_data_path: Path, domain_file_name: Path +): + flows_file_name = tmp_path / "flows.yml" + with open(flows_file_name, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + flows: + transfer_money: + description: This flow lets users send money. + name: Transfer money + steps: + - id: "ask_recipient" + collect: transfer_recipient + utter: utter_ask_recipient + next: "ask_amount" + - id: "ask_amount" + collect: amount + rejections: + - if: amount > 1000 + utter: utter_amount_too_high + next: "summarize_transfer" + - id: "summarize_transfer" + action: utter_transfer_summary + """ + ) + + importer = RasaFileImporter( + config_file="data/test_moodbot/config.yml", + domain_path=str(domain_file_name), + training_data_paths=[str(flows_file_name), str(nlu_data_path)], + ) + + validator = Validator.from_importer(importer) + + with warnings.catch_warnings() as record: + warnings.simplefilter("error") + # force validator to not ignore warnings (default is True) + assert validator.verify_utterances_in_dialogues(ignore_warnings=False) + assert record is None + + +def test_verify_utterances_in_dialogues_missing_responses_in_flows( + tmp_path: Path, nlu_data_path: Path, domain_file_name: Path +): + flows_file_name = tmp_path / "flows.yml" + # remove utter_ask_recipient from this flows file, + # but it is listed in the domain file + with open(flows_file_name, "w") as file: + file.write( + f""" + version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" + flows: + transfer_money: + description: This flow lets users send money. + name: Transfer money + steps: + - id: "ask_recipient" + collect: transfer_money_recipient + next: "ask_amount" + - id: "ask_amount" + collect: transfer_money_amount + rejections: + - if: transfer_money_amount > 1000 + utter: utter_amount_too_high + next: "summarize_transfer" + - id: "summarize_transfer" + action: utter_transfer_summary + """ + ) + + importer = RasaFileImporter( + config_file="data/test_moodbot/config.yml", + domain_path=str(domain_file_name), + training_data_paths=[str(flows_file_name), str(nlu_data_path)], + ) + + validator = Validator.from_importer(importer) + match = ( + "The utterance 'utter_ask_recipient' is not used in any story, rule or flow." + ) + with pytest.warns(UserWarning, match=match): + # force validator to not ignore warnings (default is True) + validator.verify_utterances_in_dialogues(ignore_warnings=False) diff --git a/tests/utilities.py b/tests/utilities.py index f803ec1da0ab..0874918d7251 100644 --- a/tests/utilities.py +++ b/tests/utilities.py @@ -1,7 +1,8 @@ from yarl import URL -import textwrap +from rasa.shared.core.domain import Domain from rasa.shared.core.flows.flow import FlowsList -from rasa.shared.core.flows.yaml_flows_io import YAMLFlowsReader +from rasa.shared.core.flows.yaml_flows_io import flows_from_str +from rasa.shared.importers.importer import FlowSyncImporter def latest_request(mocked, request_type, path): @@ -12,6 +13,11 @@ def json_of_latest_request(r): return r[-1].kwargs["json"] -def flows_from_str(yaml_str: str) -> FlowsList: - """Reads flows from a YAML string.""" - return YAMLFlowsReader.read_from_string(textwrap.dedent(yaml_str)) +def flows_from_str_with_defaults(yaml_str: str) -> FlowsList: + """Reads flows from a YAML string and includes buildin flows.""" + return FlowSyncImporter.merge_with_default_flows(flows_from_str(yaml_str)) + + +def flows_default_domain() -> Domain: + """Returns the default domain for the default flows.""" + return FlowSyncImporter.load_default_pattern_flows_domain()