diff --git a/.dockerignore b/.dockerignore index 30b96ab92540..a5fb96ae22d3 100644 --- a/.dockerignore +++ b/.dockerignore @@ -30,12 +30,13 @@ env/ venv/ # Documentation artifacts -docs/api-ref/schema.json +docs/ site/ # UI artifacts src/prefect/server/ui/* ui/node_modules +ui-v2/ # Databases *.db @@ -49,3 +50,9 @@ dask-worker-space/ # Editors .idea/ .vscode/ + +# Other +tests/ +compat-tests/ +benches/ +build/ diff --git a/.github/dependabot.yml b/.github/dependabot.yml index dc3a7cc4b30e..b1c83e437c9c 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -11,7 +11,7 @@ updates: directory: '/ui/' schedule: interval: "weekly" - labels: ["ui", "development"] + labels: ["ui", "ui-dependency"] - package-ecosystem: "github-actions" directory: "/" diff --git a/.github/labeler.yml b/.github/labeler.yml index 313d304a0e02..2e9b92d069e6 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -15,3 +15,7 @@ upstream dependency: 2.x: - base-branch: '2.x' + +ui-replatform: + - changed-files: + - any-glob-to-any-file: ui-v2/** \ No newline at end of file diff --git a/.github/release.yml b/.github/release.yml index 8d07814866fb..5a71e4837e22 100644 --- a/.github/release.yml +++ b/.github/release.yml @@ -1,6 +1,10 @@ # .github/release.yml changelog: + exclude: + labels: + - ui-dependency # these dont affect user environments + - ui-replatform # lots of ongoing work, but not user facing... yet categories: - title: Breaking Changes โš ๏ธ labels: @@ -26,6 +30,8 @@ changelog: - title: Development & Tidiness ๐Ÿงน labels: - development + - title: Documentation ๐Ÿ““ + labels: - docs - title: Uncategorized labels: diff --git a/.github/workflows/docker-images.yaml b/.github/workflows/docker-images.yaml index 44ec28968e88..2630f5155626 100644 --- a/.github/workflows/docker-images.yaml +++ b/.github/workflows/docker-images.yaml @@ -101,6 +101,11 @@ jobs: flavor: | latest=false + - name: Get node version + id: get_node_version + run: | + echo "NODE_VERSION=$(cat .nvmrc)" >> $GITHUB_OUTPUT + - name: Build and push image uses: docker/build-push-action@v6 with: @@ -108,6 +113,7 @@ jobs: platforms: linux/amd64,linux/arm64 build-args: | PYTHON_VERSION=${{ matrix.python-version }} + NODE_VERSION=${{ steps.get_node_version.outputs.NODE_VERSION }} ${{ ( endsWith(matrix.flavor, 'conda') && 'BASE_IMAGE=prefect-conda' ) || '' }} ${{ ( endsWith(matrix.flavor, 'kubernetes') && 'PREFECT_EXTRAS=[kubernetes]' ) || '' }} tags: ${{ join(steps.metadata-dev.outputs.tags) }},${{ join(steps.metadata-prod.outputs.tags) }} diff --git a/.github/workflows/proxy-test.yaml b/.github/workflows/proxy-test.yaml new file mode 100644 index 000000000000..3d8a1512a107 --- /dev/null +++ b/.github/workflows/proxy-test.yaml @@ -0,0 +1,74 @@ +# This is a simple test to ensure we can make a websocket connection through a proxy server. It sets up a +# simple server and a squid proxy server. The proxy server is inaccessible from the host machine, only the proxy +# so we can confirm the proxy is actually working. + +name: Proxy Test +on: + pull_request: + paths: + - .github/workflows/proxy-test.yaml + - scripts/proxy-test/* + - "src/prefect/events/clients.py" + - requirements.txt + - requirements-client.txt + - requirements-dev.txt + push: + branches: + - main + paths: + - .github/workflows/proxy-test.yaml + - scripts/proxy-test/* + - "src/prefect/events/clients.py" + - requirements.txt + - requirements-client.txt + - requirements-dev.txt + +jobs: + proxy-test: + name: Proxy Test + timeout-minutes: 10 + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + id: setup_python + with: + python-version: "3.10" + + - name: Create Docker networks + run: | + docker network create internal_net --internal + docker network create external_net + + - name: Start API server container + working-directory: scripts/proxy-test + run: | + docker build -t api-server . + docker run -d --network internal_net --name server api-server + + - name: Start Squid Proxy container + run: | + docker run -d \ + --network internal_net \ + --network external_net \ + -p 3128:3128 \ + -v $(pwd)/scripts/proxy-test/squid.conf:/etc/squid/squid.conf \ + --name proxy \ + ubuntu/squid + + - name: Install Dependencies + run: | + python -m pip install -U uv + uv pip install --upgrade --system . + + - name: Run Proxy Tests + env: + HTTP_PROXY: http://localhost:3128 + HTTPS_PROXY: http://localhost:3128 + run: python scripts/proxy-test/client.py diff --git a/.github/workflows/python-tests.yaml b/.github/workflows/python-tests.yaml index 26d344a44641..33af87e3b9f4 100644 --- a/.github/workflows/python-tests.yaml +++ b/.github/workflows/python-tests.yaml @@ -16,6 +16,7 @@ on: - requirements-dev.txt - setup.cfg - Dockerfile + - scripts/entrypoint.sh push: branches: - main @@ -28,7 +29,7 @@ on: - requirements-dev.txt - setup.cfg - Dockerfile - + - scripts/entrypoint.sh permissions: contents: read actions: write @@ -266,6 +267,11 @@ jobs: tmp="sha-$SHORT_SHA-python${{ matrix.python-version }}" echo "image_tag=${tmp}" >> $GITHUB_OUTPUT + - name: Get node version + id: get_node_version + run: | + echo "NODE_VERSION=$(cat .nvmrc)" >> $GITHUB_OUTPUT + - name: Login to DockerHub uses: docker/login-action@v3 if: github.event.pull_request.head.repo.full_name == github.repository @@ -283,6 +289,7 @@ jobs: build-args: | PYTHON_VERSION=${{ matrix.python-version }} PREFECT_EXTRAS=[dev] + NODE_VERSION=${{ steps.get_node_version.outputs.NODE_VERSION }} tags: prefecthq/prefect-dev:${{ steps.get_image_tag.outputs.image_tag }} outputs: type=docker,dest=/tmp/image.tar diff --git a/.github/workflows/static-analysis.yaml b/.github/workflows/static-analysis.yaml index aba7bf3b6b25..96146dec5f89 100644 --- a/.github/workflows/static-analysis.yaml +++ b/.github/workflows/static-analysis.yaml @@ -67,3 +67,62 @@ jobs: - name: Run pre-commit run: | pre-commit run --show-diff-on-failure --color=always --all-files + + type-completeness-check: + name: Type completeness check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Set up uv + uses: astral-sh/setup-uv@v4 + with: + python-version: "3.12" + + - name: Calculate type completeness score + id: calculate_current_score + run: | + # `pyright` will exit with a non-zero status code if it finds any issues, + # so we need to explicitly ignore the exit code with `|| true`. + uv tool run --with-editable . pyright --verifytypes prefect --ignoreexternal --outputjson > prefect-analysis.json || true + SCORE=$(jq -r '.typeCompleteness.completenessScore' prefect-analysis.json) + echo "current_score=$SCORE" >> $GITHUB_OUTPUT + + - name: Checkout base branch + run: | + git checkout ${{ github.base_ref }} + + - name: Calculate base branch score + id: calculate_base_score + run: | + uv tool run --with-editable . pyright --verifytypes prefect --ignoreexternal --outputjson > prefect-analysis-base.json || true + BASE_SCORE=$(jq -r '.typeCompleteness.completenessScore' prefect-analysis-base.json) + echo "base_score=$BASE_SCORE" >> $GITHUB_OUTPUT + + - name: Compare scores + run: | + CURRENT_SCORE=$(echo ${{ steps.calculate_current_score.outputs.current_score }}) + BASE_SCORE=$(echo ${{ steps.calculate_base_score.outputs.base_score }}) + + if (( $(echo "$BASE_SCORE > $CURRENT_SCORE" | bc -l) )); then + echo "::notice title=Type Completeness Check::We noticed a decrease in type coverage with these changes. Check workflow summary for more details." + echo "### โ„น๏ธ Type Completeness Check" >> $GITHUB_STEP_SUMMARY + echo "We noticed a decrease in type coverage with these changes. To maintain our codebase quality, we aim to keep or improve type coverage with each change." >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "Need help? Ping @desertaxle or @zzstoatzz for assistance!" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "Here's what changed:" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + uv run scripts/pyright_diff.py prefect-analysis-base.json prefect-analysis.json >> $GITHUB_STEP_SUMMARY + SCORE_DIFF=$(echo "$BASE_SCORE - $CURRENT_SCORE" | bc -l) + if (( $(echo "$SCORE_DIFF > 0.001" | bc -l) )); then + exit 1 + fi + elif (( $(echo "$BASE_SCORE < $CURRENT_SCORE" | bc -l) )); then + echo "๐ŸŽ‰ Great work! The type coverage has improved with these changes" >> $GITHUB_STEP_SUMMARY + else + echo "โœ… Type coverage maintained" >> $GITHUB_STEP_SUMMARY + fi diff --git a/.github/workflows/time-docker-build.yaml b/.github/workflows/time-docker-build.yaml new file mode 100644 index 000000000000..659f0584b8c9 --- /dev/null +++ b/.github/workflows/time-docker-build.yaml @@ -0,0 +1,112 @@ +name: Docker Build Time Benchmark + +on: + push: + branches: + - main + paths: + - "Dockerfile" + - ".dockerignore" + pull_request: + paths: + - "Dockerfile" + - ".dockerignore" + +jobs: + benchmark: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + # For PRs, checkout the base branch to compare against + - name: Checkout base branch + if: github.base_ref + uses: actions/checkout@v4 + with: + ref: ${{ github.base_ref }} + clean: true + + - name: Clean Docker system + run: | + docker system prune -af + docker builder prune -af + + - name: Build base branch image + if: github.base_ref + id: base_build_time + run: | + start_time=$(date +%s) + DOCKER_BUILDKIT=1 docker build . --no-cache --progress=plain + end_time=$(date +%s) + base_time=$((end_time - start_time)) + echo "base_time=$base_time" >> $GITHUB_OUTPUT + + # For PRs, checkout back to the PR's HEAD + - name: Checkout PR branch + if: github.base_ref + uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref }} + clean: true + + - name: Clean Docker system again + run: | + docker system prune -af + docker builder prune -af + + - name: Build and time Docker image + id: build_time + run: | + start_time=$(date +%s) + DOCKER_BUILDKIT=1 docker build . --no-cache --progress=plain + end_time=$(date +%s) + build_time=$((end_time - start_time)) + echo "build_time=$build_time" >> $GITHUB_OUTPUT + + - name: Compare build times + run: | + CURRENT_TIME=${{ steps.build_time.outputs.build_time }} + + if [ "${{ github.base_ref }}" != "" ]; then + BASE_TIME=${{ steps.base_build_time.outputs.base_time }} + + echo "### ๐Ÿ—๏ธ Docker Build Time Comparison" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "| Branch | Build Time | Difference |" >> $GITHUB_STEP_SUMMARY + echo "|--------|------------|------------|" >> $GITHUB_STEP_SUMMARY + echo "| base (${{ github.base_ref }}) | ${BASE_TIME}s | - |" >> $GITHUB_STEP_SUMMARY + + DIFF=$((CURRENT_TIME - BASE_TIME)) + PERCENT=$(echo "scale=2; ($CURRENT_TIME - $BASE_TIME) * 100 / $BASE_TIME" | bc) + + if [ $DIFF -gt 0 ]; then + DIFF_TEXT="โฌ†๏ธ +${DIFF}s (+${PERCENT}%)" + elif [ $DIFF -lt 0 ]; then + DIFF_TEXT="โฌ‡๏ธ ${DIFF}s (${PERCENT}%)" + else + DIFF_TEXT="โœจ No change" + fi + + echo "| current (${{ github.head_ref }}) | ${CURRENT_TIME}s | $DIFF_TEXT |" >> $GITHUB_STEP_SUMMARY + + # Fail if build time increased by more than 5% + if (( $(echo "$PERCENT > 5" | bc -l) )); then + echo "" >> $GITHUB_STEP_SUMMARY + echo "โŒ **Build time increased by more than 5%!**" >> $GITHUB_STEP_SUMMARY + echo "This change significantly increases the build time. Please review the Dockerfile changes." >> $GITHUB_STEP_SUMMARY + exit 1 + elif (( $(echo "$PERCENT < 0" | bc -l) )); then + echo "" >> $GITHUB_STEP_SUMMARY + echo "โœ… **Build time decreased!**" >> $GITHUB_STEP_SUMMARY + echo "Great job optimizing the build!" >> $GITHUB_STEP_SUMMARY + fi + else + echo "### ๐Ÿ—๏ธ Docker Build Time" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "Build completed in ${CURRENT_TIME} seconds" >> $GITHUB_STEP_SUMMARY + fi diff --git a/.gitignore b/.gitignore index 127e8e8c02a0..c247225328a5 100644 --- a/.gitignore +++ b/.gitignore @@ -52,7 +52,6 @@ src/prefect/server/ui_build/* # API artifacts - # MacOS .DS_Store @@ -76,4 +75,7 @@ libcairo.2.dylib # setuptools-scm generated files src/integrations/*/**/_version.py -*.log \ No newline at end of file +*.log + +# Pyright type analysis report +prefect-analysis.json diff --git a/.nvmrc b/.nvmrc index 6aab9b43fa34..02c8b485edb5 100644 --- a/.nvmrc +++ b/.nvmrc @@ -1 +1 @@ -v18.18.0 +18.18.0 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 30a626c49756..bf4a16c4ca6a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -45,9 +45,6 @@ repos: src/prefect/server/events/.*| scripts/generate_mintlify_openapi_docs.py )$ - - - repo: local - hooks: - id: generate-settings-schema name: Generating Settings Schema language: system @@ -59,9 +56,6 @@ repos: src/prefect/settings/models/.*| scripts/generate_settings_schema.py )$ - - - repo: local - hooks: - id: generate-settings-ref name: Generating Settings Reference language: system @@ -73,3 +67,40 @@ repos: src/prefect/settings/models/.*| scripts/generate_settings_ref.py )$ + - id: lint-ui-v2 + name: Lint UI v2 + language: system + entry: sh + args: ['-c', 'cd ui-v2 && npm i --no-upgrade --silent --no-progress && npm run lint'] + files: | + (?x)^( + .pre-commit-config.yaml| + ui-v2/.* + )$ + pass_filenames: false + - id: format-ui-v2 + name: Format UI v2 + language: system + entry: sh + args: ['-c', 'cd ui-v2 && npm i --no-upgrade --silent --no-progress && npm run format'] + files: | + (?x)^( + .pre-commit-config.yaml| + ui-v2/.* + )$ + pass_filenames: false + - id: service-sync-ui-v2-openapi + name: Sync UI v2 OpenAPI + language: system + entry: sh + args: ['-c', 'cd ui-v2 && npm i --no-upgrade --silent --no-progress && npm run service-sync'] + files: | + (?x)^( + .pre-commit-config.yaml| + .pre-commit-config.yaml| + src/prefect/server/api/.*| + src/prefect/server/schemas/.*| + src/prefect/server/events/.*| + ui-v2/package.json + )$ + pass_filenames: false \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 0f6b678dfaf7..542296331793 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,12 +9,12 @@ ARG BASE_IMAGE=python:${PYTHON_VERSION}-slim # The version used to build the Python distributable. ARG BUILD_PYTHON_VERSION=3.9 # THe version used to build the UI distributable. -ARG NODE_VERSION=16.15 +ARG NODE_VERSION=18.18.0 # Any extra Python requirements to install ARG EXTRA_PIP_PACKAGES="" # Build the UI distributable. -FROM node:${NODE_VERSION}-bullseye-slim as ui-builder +FROM node:${NODE_VERSION}-bullseye-slim AS ui-builder WORKDIR /opt/ui @@ -61,7 +61,7 @@ RUN mv "dist/$(python setup.py --fullname).tar.gz" "dist/prefect.tar.gz" # Setup a base final image from miniconda -FROM continuumio/miniconda3 as prefect-conda +FROM continuumio/miniconda3 AS prefect-conda # Create a new conda environment with our required Python version ARG PYTHON_VERSION @@ -76,23 +76,23 @@ SHELL ["/bin/bash", "--login", "-c"] # Build the final image with Prefect installed and our entrypoint configured -FROM ${BASE_IMAGE} as final +FROM ${BASE_IMAGE} AS final -ENV LC_ALL C.UTF-8 -ENV LANG C.UTF-8 +ENV LC_ALL=C.UTF-8 +ENV LANG=C.UTF-8 -LABEL maintainer="help@prefect.io" -LABEL io.prefect.python-version=${PYTHON_VERSION} -LABEL org.label-schema.schema-version = "1.0" -LABEL org.label-schema.name="prefect" -LABEL org.label-schema.url="https://www.prefect.io/" +ENV UV_LINK_MODE=copy +ENV UV_SYSTEM_PYTHON=1 + +LABEL maintainer="help@prefect.io" \ + io.prefect.python-version=${PYTHON_VERSION} \ + org.label-schema.schema-version="1.0" \ + org.label-schema.name="prefect" \ + org.label-schema.url="https://www.prefect.io/" WORKDIR /opt/prefect -# Install requirements -# - tini: Used in the entrypoint -# - build-essential: Required for Python dependencies without wheels -# - git: Required for retrieving workflows from git sources +# Install system requirements RUN apt-get update && \ apt-get install --no-install-recommends -y \ tini=0.19.* \ @@ -100,25 +100,31 @@ RUN apt-get update && \ git=1:2.* \ && apt-get clean && rm -rf /var/lib/apt/lists/* -# Pin the pip version -RUN python -m pip install --no-cache-dir pip==24.2 +# Install UV from official image - pin to specific version for build caching +COPY --from=ghcr.io/astral-sh/uv:0.5.8 /uv /uvx /bin/ -# Install the base requirements separately so they cache -COPY requirements-client.txt requirements.txt ./ -RUN pip install --upgrade --upgrade-strategy eager --no-cache-dir -r requirements.txt +# Install dependencies using a temporary mount for requirements files +RUN --mount=type=bind,source=requirements-client.txt,target=/tmp/requirements-client.txt \ + --mount=type=bind,source=requirements.txt,target=/tmp/requirements.txt \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip install --system -r /tmp/requirements.txt -r /tmp/requirements-client.txt # Install prefect from the sdist COPY --from=python-builder /opt/prefect/dist ./dist -# Extras to include during `pip install`. Must be wrapped in brackets, e.g. "[dev]" -ARG PREFECT_EXTRAS=${PREFECT_EXTRAS:-""} -RUN pip install --no-cache-dir "./dist/prefect.tar.gz${PREFECT_EXTRAS}" +# Extras to include during installation +ARG PREFECT_EXTRAS +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install "./dist/prefect.tar.gz${PREFECT_EXTRAS:-""}" && \ + rm -rf dist/ -# Remove setuptools from the image -RUN pip uninstall -y setuptools +# Remove setuptools +RUN uv pip uninstall setuptools -ARG EXTRA_PIP_PACKAGES=${EXTRA_PIP_PACKAGES:-""} -RUN [ -z "${EXTRA_PIP_PACKAGES}" ] || pip install --no-cache-dir "${EXTRA_PIP_PACKAGES}" +# Install any extra packages +ARG EXTRA_PIP_PACKAGES +RUN --mount=type=cache,target=/root/.cache/uv \ + [ -z "${EXTRA_PIP_PACKAGES:-""}" ] || uv pip install "${EXTRA_PIP_PACKAGES}" # Smoke test RUN prefect version diff --git a/MANIFEST.in b/MANIFEST.in index 53e36053eb31..fd224ca8a6c8 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -25,9 +25,9 @@ include src/prefect/server/api/collections_data/views/*.json # Migrations include src/prefect/server/database/alembic.ini -include src/prefect/server/database/migrations/* -include src/prefect/server/database/migrations/versions/* -include src/prefect/server/database/migrations/versions/*/* +include src/prefect/server/database/_migrations/* +include src/prefect/server/database/_migrations/versions/* +include src/prefect/server/database/_migrations/versions/*/* # SQL templates graft src/prefect/server/database/sql diff --git a/compat-tests b/compat-tests index 9b5fc44426b6..3c5ec0111e2a 160000 --- a/compat-tests +++ b/compat-tests @@ -1 +1 @@ -Subproject commit 9b5fc44426b6a98a05408106fd6b5453ae9a0c76 +Subproject commit 3c5ec0111e2aa7b160f2b21cfd383d19448dfe13 diff --git a/docs/mint.json b/docs/mint.json index c65644f34a09..70481b0620aa 100644 --- a/docs/mint.json +++ b/docs/mint.json @@ -73,7 +73,8 @@ { "group": "For platform engineers", "pages": [ - "v3/tutorials/platform" + "v3/tutorials/platform", + "v3/tutorials/debug" ] } ], @@ -195,8 +196,7 @@ "v3/automate/events/events", "v3/automate/events/automations-triggers", "v3/automate/events/custom-triggers", - "v3/automate/events/webhook-triggers", - "v3/automate/incidents" + "v3/automate/events/webhook-triggers" ], "version": "v3" }, @@ -445,6 +445,7 @@ "v3/api-ref/rest-api/server/flow-runs/delete-flow-run-input", "v3/api-ref/rest-api/server/flow-runs/paginate-flow-runs", "v3/api-ref/rest-api/server/flow-runs/download-logs", + "v3/api-ref/rest-api/server/flow-runs/update-flow-run-labels", "v3/api-ref/rest-api/server/flow-runs/read-flow-run-history", "v3/api-ref/rest-api/server/flow-runs/count-task-runs-by-flow-run" ] diff --git a/docs/v3/api-ref/rest-api/server/flow-runs/download-logs.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/download-logs.mdx index 025ea4c68981..f2a9f88c417b 100644 --- a/docs/v3/api-ref/rest-api/server/flow-runs/download-logs.mdx +++ b/docs/v3/api-ref/rest-api/server/flow-runs/download-logs.mdx @@ -1,3 +1,3 @@ --- -openapi: get /api/flow_runs/{id}/logs +openapi: get /api/flow_runs/{id}/logs/download --- \ No newline at end of file diff --git a/docs/v3/api-ref/rest-api/server/flow-runs/update-flow-run-labels.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/update-flow-run-labels.mdx new file mode 100644 index 000000000000..482191e872c6 --- /dev/null +++ b/docs/v3/api-ref/rest-api/server/flow-runs/update-flow-run-labels.mdx @@ -0,0 +1,3 @@ +--- +openapi: patch /api/flow_runs/{id}/labels +--- \ No newline at end of file diff --git a/docs/v3/api-ref/rest-api/server/schema.json b/docs/v3/api-ref/rest-api/server/schema.json index 694f7648d060..62986c65c961 100644 --- a/docs/v3/api-ref/rest-api/server/schema.json +++ b/docs/v3/api-ref/rest-api/server/schema.json @@ -1521,6 +1521,66 @@ } } }, + "/api/flow_runs/{id}/labels": { + "patch": { + "tags": [ + "Flow Runs" + ], + "summary": "Update Flow Run Labels", + "description": "Update the labels of a flow run.", + "operationId": "update_flow_run_labels_flow_runs__id__labels_patch", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow run id", + "title": "Id" + }, + "description": "The flow run id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "type": "object", + "description": "The labels to update", + "title": "Labels" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, "/api/task_runs/": { "post": { "tags": [ diff --git a/docs/v3/deploy/infrastructure-examples/serverless.mdx b/docs/v3/deploy/infrastructure-examples/serverless.mdx index 3adcf30ff1d8..b455c7a139ca 100644 --- a/docs/v3/deploy/infrastructure-examples/serverless.mdx +++ b/docs/v3/deploy/infrastructure-examples/serverless.mdx @@ -678,6 +678,10 @@ With your deployment created, navigate to its detail page and create a new flow You'll see the flow start running without polling the work pool, because Prefect Cloud securely connected to your serverless infrastructure, created a job, ran the job, and reported on its execution. +## Usage Limits + +Push work pool usage is unlimited. However push work pools limit flow runs to 24 hours. + ## Next steps Learn more about [work pools](/v3/deploy/infrastructure-concepts/work-pools/) and [workers](/v3/deploy/infrastructure-concepts/workers/). diff --git a/docs/v3/develop/blocks.mdx b/docs/v3/develop/blocks.mdx index eb8eada8c7c4..075f11e6d970 100644 --- a/docs/v3/develop/blocks.mdx +++ b/docs/v3/develop/blocks.mdx @@ -405,6 +405,11 @@ You can create and use these block types through the UI without installing any a | SMB | `smb` | Store data as a file on a SMB share. | | Twilio SMS | `twilio-sms` | Send notifications through Twilio SMS. | + +Built-in blocks should be registered the first time you start a Prefect server. If the auto-registration fails, you can manually register the blocks using `prefect blocks register`. + +For example, to register all built-in notification blocks, run `prefect block register -m prefect.blocks.notifications`. + The `S3`, `Azure`, `GCS`, and `GitHub` blocks are deprecated in favor of the corresponding `S3Bucket`, diff --git a/docs/v3/develop/settings-ref.mdx b/docs/v3/develop/settings-ref.mdx index 62fb0d353300..1d85829fd752 100644 --- a/docs/v3/develop/settings-ref.mdx +++ b/docs/v3/develop/settings-ref.mdx @@ -467,6 +467,18 @@ Enables sending telemetry to Prefect Cloud. **Supported environment variables**: `PREFECT_EXPERIMENTS_TELEMETRY_ENABLED` +### `lineage_events_enabled` +If `True`, enables emitting lineage events. Set to `False` to disable lineage event emission. + +**Type**: `boolean` + +**Default**: `False` + +**TOML dotted key path**: `experiments.lineage_events_enabled` + +**Supported environment variables**: +`PREFECT_EXPERIMENTS_LINEAGE_EVENTS_ENABLED` + --- ## FlowsSettings Settings for controlling flow behavior diff --git a/docs/v3/manage/cloud/rate-limits.mdx b/docs/v3/manage/cloud/rate-limits.mdx index 24d8c70091cf..ca4eaa8112df 100644 --- a/docs/v3/manage/cloud/rate-limits.mdx +++ b/docs/v3/manage/cloud/rate-limits.mdx @@ -20,7 +20,7 @@ The `flow_runs`, `task_runs`, and `flows` endpoints and their subroutes are limi - 400 requests per minute for Free accounts - 2,000 requests per minute for Pro accounts -These endpoints return a `429` response with an appropriate `Retry-After` header if this limit is triggered. +These endpoints return a `429` response with an appropriate `Retry-After` header if this limit is triggered. See [ClientSettings](/v3/develop/settings-ref#clientsettings) for more information on how retries are handled client-side and how to modify the default behavior. The `logs` endpoint is limited to: diff --git a/docs/v3/tutorials/debug.mdx b/docs/v3/tutorials/debug.mdx new file mode 100644 index 000000000000..5f1792ac6b83 --- /dev/null +++ b/docs/v3/tutorials/debug.mdx @@ -0,0 +1,120 @@ +--- +title: Debug a data pipeline +description: Learn how to troubleshoot flow runs that fail. +--- + +In the [Set up a platform for data pipelines](/v3/tutorials/platform) tutorial, you used Prefect Cloud to set up a platform for data pipelines. +In this tutorial, you'll learn what to do when those data pipelines fail. + + +This tutorial starts where the [previous tutorial](/v3/tutorials/platform) leaves off, so complete that one first. +You will need a paid Prefect Cloud account. + + +## Find failures + +You can use the Prefect Cloud dashboard to find failures. + +1. Sign in to Prefect Cloud +1. Use the workspace switcher to open the `staging` workspace that you created in the last tutorial. +1. Go to **Home**, and look for red bars in the **Flow Runs** section, these indicate failed flow runs. +1. Hover over a red bar to see more details about the flow run: name, deployment, duration, timestamp, and tags. + + +You can filter by a specific tag (e.g. `team-a`) if you're only interested in a specific set of flows. + + +## Debug a failure + +A single flow might experience failures on several runs. +When this happens, it can be helpful to inspect the first failure in the series. + +1. In the **Flow Runs** section on the **Home** page, expand the `data-pipeline` flow. +1. You will see a list of failing `data-pipeline` flow runs, in reverse chronological order. +1. Use the pagination controls to navigate to the last failure in the list, this is the first failure that occurred. +1. Click the name of the flow run to go to its detail page. +1. From the flow run detail page, scroll down to the **Logs** section in the right panel. +1. Look for an error message similar to the following: ++ +``` +File "/opt/prefect/demos/simulate_failures.py", line 12, in process_data + raise Exception(f"Run failed") +``` + +It looks like there's an error in the `simulate_failures.py` file. +Now that you've found the failure, the next step is to fix the underlying code. + +## Update the code + +Open the `simulate_failures.py` file and look at line 12. + +```python simulate_failures.py {12} +import argparse +import asyncio +from typing import Optional + +from prefect import flow, task +from prefect.client.orchestration import get_client + + +@task +def process_data(run: int, fail_at_run: Optional[int] = None) -> bool: + """Simulate data processing with failures""" + + # Simulate persistent failures + if fail_at_run and run > fail_at_run: + raise Exception(f"Run failed") + + return True + +# ... +``` + +The `if` statement is the problem. +If you specify the `--fail_at_run` flag, once the flow runs more than `fail_at_run` times, the flow fails with an exception. +Remove the `if` statement to fix this failure. +We added this statement to give you something to fix. :) + +```python simulate_failures.py +import argparse +import asyncio +from typing import Optional +from prefect import flow, task +from prefect.client.orchestration import get_client + +@task +def process_data(run: int, fail_at_run: Optional[int] = None) -> bool: + """Simulate data processing with failures""" + + return True + +# ... +``` + +Now, all flow runs succeed in spite of the `--fail-at-run` flag. +Deploy the fix to the staging workspace to confirm this new behavior. + +```bash +prefect cloud workspace set --workspace "/staging" +python simulate_failures.py --fail-at-run 3 +``` + +After the script finishes, open the **Home** page in Prefect Cloud to verify that the flow run is no longer failing. + +You can now switch workspaces to update the code used in the production workspace as well. + +```bash +prefect cloud workspace set --workspace "/production" +python simulate_failures.py +``` + + +## Next steps + +In this tutorial, you successfully used Prefect Cloud to fix a failing data pipeline. + +To take this to the next level, learn how to [set up an alert](/v3/automate/events/automations-triggers) so that you get notified about failures automatically. + + +Need help? [Book a meeting](https://calendly.com/prefect-experts/prefect-product-advocates?utm_campaign=prefect_docs_cloud&utm_content=prefect_docs&utm_medium=docs&utm_source=docs) with a Prefect Product Advocate to get your questions answered. + diff --git a/docs/v3/tutorials/pipelines.mdx b/docs/v3/tutorials/pipelines.mdx index 65ef4a58eb24..5df61a230e53 100644 --- a/docs/v3/tutorials/pipelines.mdx +++ b/docs/v3/tutorials/pipelines.mdx @@ -22,14 +22,18 @@ The first improvement you can make is to add retries to your flow. Whenever an HTTP request fails, you can retry it a few times before giving up. ```python +from typing import Any + +import httpx from prefect import task + @task(retries=3) -def fetch_stats(github_repo: str): +def fetch_stats(github_repo: str) -> dict[str, Any]: """Task 1: Fetch the statistics for a GitHub repo""" api_response = httpx.get(f"https://api.github.com/repos/{github_repo}") - api_response.raise_for_status() # Force a retry if you don't get a 2xx status code + api_response.raise_for_status() # Force a retry if not a 2xx status code return api_response.json() ``` @@ -37,10 +41,26 @@ def fetch_stats(github_repo: str): Run the following code to see retries in action: ```python -import httpx +from typing import Any +import httpx from prefect import flow, task # Prefect flow and task decorators +@task(retries=3) +def fetch_stats(github_repo: str) -> dict[str, Any]: + """Task 1: Fetch the statistics for a GitHub repo""" + + api_response = httpx.get(f"https://api.github.com/repos/{github_repo}") + api_response.raise_for_status() # Force a retry if not a 2xx status code + return api_response.json() + + +@task +def get_stars(repo_stats: dict[str, Any]) -> int: + """Task 2: Get the number of stars from GitHub repo statistics""" + + return repo_stats['stargazers_count'] + @flow(log_prints=True) def show_stars(github_repos: list[str]): @@ -57,21 +77,6 @@ def show_stars(github_repos: list[str]): print(f"{repo}: {stars} stars") -@task(retries=3) -def fetch_stats(github_repo: str): - """Task 1: Fetch the statistics for a GitHub repo""" - - api_response = httpx.get(f"https://api.github.com/repos/{github_repo}") - api_response.raise_for_status() # Force a retry if you don't get a 2xx status code - return api_response.json() - - -@task -def get_stars(repo_stats: dict): - """Task 2: Get the number of stars from GitHub repo statistics""" - - return repo_stats['stargazers_count'] - # Run the flow if __name__ == "__main__": @@ -86,82 +91,83 @@ if __name__ == "__main__": ## Concurrent execution of slow tasks If individual API requests are slow, you can speed them up in aggregate by making multiple requests concurrently. -When you call the `submit` method on a task, the task is submitted to a task runner for execution. +When you call the `map` method on a task, you submit a list of arguments to the task runner to run concurrently (alternatively, you could [`.submit()` each argument individually](/v3/develop/task-runners#access-results-from-submitted-tasks)). ```python from prefect import flow @flow(log_prints=True) -def show_stars(github_repos: list[str]): - """Flow: Show the number of stars that GitHub repos have""" +def show_stars(github_repos: list[str]) -> None: + """Flow: Show number of GitHub repo stars""" # Task 1: Make HTTP requests concurrently - repo_stats = [] - for repo in github_repos: - repo_stats.append({ - 'repo': repo, - 'task': fetch_stats.submit(repo) # Submit each task to a task runner - }) - - # Task 2: Once each concurrent task completes, show the results - for repo in repo_stats: - repo_name = repo['repo'] - stars = get_stars(repo['task'].result()) # Block until the task has completed - print(f"{repo_name}: {stars} stars") + stats_futures = fetch_stats.map(github_repos) + + # Task 2: Once each concurrent task completes, get the star counts + stars = get_stars.map(stats_futures).result() + + # Show the results + for repo, star_count in zip(github_repos, stars): + print(f"{repo}: {star_count} stars") ``` Run the following code to see concurrent tasks in action: ```python -import httpx - -from prefect import flow, task # Prefect flow and task decorators - - -@flow(log_prints=True) -def show_stars(github_repos: list[str]): - """Flow: Show the number of stars that GitHub repos have""" - - # Task 1: Make HTTP requests concurrently - repo_stats = [] - for repo in github_repos: - repo_stats.append({ - 'repo': repo, - 'task': fetch_stats.submit(repo) # Submit each task to a task runner - }) +from typing import Any - # Task 2: Once each concurrent task completes, show the results - for repo in repo_stats: - repo_name = repo['repo'] - stars = get_stars(repo['task'].result()) # Block until the task has completed - print(f"{repo_name}: {stars} stars") +import httpx +from prefect import flow, task -@task -def fetch_stats(github_repo: str): +@task(retries=3) +def fetch_stats(github_repo: str) -> dict[str, Any]: """Task 1: Fetch the statistics for a GitHub repo""" - return httpx.get(f"https://api.github.com/repos/{github_repo}").json() @task -def get_stars(repo_stats: dict): +def get_stars(repo_stats: dict[str, Any]) -> int: """Task 2: Get the number of stars from GitHub repo statistics""" + return repo_stats["stargazers_count"] - return repo_stats['stargazers_count'] + +@flow(log_prints=True) +def show_stars(github_repos: list[str]) -> None: + """Flow: Show number of GitHub repo stars""" + + # Task 1: Make HTTP requests concurrently + stats_futures = fetch_stats.map(github_repos) + + # Task 2: Once each concurrent task completes, get the star counts + stars = get_stars.map(stats_futures).result() + + # Show the results + for repo, star_count in zip(github_repos, stars): + print(f"{repo}: {star_count} stars") -# Run the flow if __name__ == "__main__": - show_stars([ - "PrefectHQ/prefect", - "pydantic/pydantic", - "huggingface/transformers" - ]) + # Run the flow + show_stars( + [ + "PrefectHQ/prefect", + "pydantic/pydantic", + "huggingface/transformers" + ] + ) + ``` + +Calling `.result()` on the list of futures returned by `.map()` will block until all tasks are complete. + +Read more in the [`.map()` documentation](/v3/develop/task-runners#mapping-over-iterables). + + + ## Avoid getting rate limited One consequence of running tasks concurrently is that you're more likely to hit the rate limits of whatever API you're using. @@ -172,74 +178,56 @@ To avoid this, use Prefect to set a global concurrency limit. prefect gcl create github-api --limit 60 --slot-decay-per-second 0.016 ``` -Now, you can use this global concurrency limit in your code: +Now, you can use this global concurrency limit in your code to rate limit your API requests. ```python -from prefect import flow +from typing import Any +from prefect import task from prefect.concurrency.sync import rate_limit -@flow(log_prints=True) -def show_stars(github_repos: list[str]): - """Flow: Show the number of stars that GitHub repos have""" - - repo_stats = [] - for repo in github_repos: - # Apply the concurrency limit to this loop - rate_limit("github-api") - - # Call Task 1 - repo_stats.append({ - 'repo': repo, - 'task': fetch_stats.submit(repo) - }) - - # ... +@task +def fetch_stats(github_repo: str) -> dict[str, Any]: + """Task 1: Fetch the statistics for a GitHub repo""" + rate_limit("github-api") + return httpx.get(f"https://api.github.com/repos/{github_repo}").json() ``` Run the following code to see concurrency limits in action: ```python -import httpx +from typing import Any -from prefect import flow, task # Prefect flow and task decorators +import httpx +from prefect import flow, task from prefect.concurrency.sync import rate_limit - -@flow(log_prints=True) -def show_stars(github_repos: list[str]): - """Flow: Show the number of stars that GitHub repos have""" - - repo_stats = [] - for repo in github_repos: - # Apply the concurrency limit to this loop - rate_limit("github-api") - - # Call Task 1 - repo_stats.append({ - 'repo': repo, - 'task': fetch_stats.submit(repo) - }) - - # Call Task 2 - stars = get_stars(repo_stats) - - # Print the result - print(f"{repo}: {stars} stars") - - -@task -def fetch_stats(github_repo: str): +@task(retries=3) +def fetch_stats(github_repo: str) -> dict[str, Any]: """Task 1: Fetch the statistics for a GitHub repo""" - + rate_limit("github-api") return httpx.get(f"https://api.github.com/repos/{github_repo}").json() @task -def get_stars(repo_stats: dict): +def get_stars(repo_stats: dict[str, Any]) -> int: """Task 2: Get the number of stars from GitHub repo statistics""" + return repo_stats["stargazers_count"] - return repo_stats['stargazers_count'] + +@flow(log_prints=True) +def show_stars(github_repos: list[str]) -> None: + """Flow: Show number of GitHub repo stars""" + + # Task 1: Make HTTP requests concurrently + stats_futures = fetch_stats.map(github_repos) + + # Task 2: Once each concurrent task completes, get the star counts + stars = get_stars.map(stats_futures).result() + + # Show the results + for repo, star_count in zip(github_repos, stars): + print(f"{repo}: {star_count} stars") # Run the flow @@ -258,13 +246,14 @@ For efficiency, you can skip tasks that have already run. For example, if you don't want to fetch the number of stars for a given repository more than once per day, you can cache those results for a day. ```python +from typing import Any from datetime import timedelta from prefect import task from prefect.cache_policies import INPUTS @task(cache_policy=INPUTS, cache_expiration=timedelta(days=1)) -def fetch_stats(github_repo: str): +def fetch_stats(github_repo: str) -> dict[str, Any]: """Task 1: Fetch the statistics for a GitHub repo""" # ... ``` @@ -273,40 +262,44 @@ def fetch_stats(github_repo: str): Run the following code to see caching in action: ```python +from typing import Any from datetime import timedelta -import httpx -from prefect import flow, task # Prefect flow and task decorators +import httpx +from prefect import flow, task from prefect.cache_policies import INPUTS +from prefect.concurrency.sync import rate_limit - -@flow(log_prints=True) -def show_stars(github_repos: list[str]): - """Flow: Show the number of stars that GitHub repos have""" - - for repo in github_repos: - # Call Task 1 - repo_stats = fetch_stats(repo) - - # Call Task 2 - stars = get_stars(repo_stats) - - # Print the result - print(f"{repo}: {stars} stars") - - -@task(cache_policy=INPUTS, cache_expiration=timedelta(days=1)) -def fetch_stats(github_repo: str): +@task( + retries=3, + cache_policy=INPUTS, + cache_expiration=timedelta(days=1) +) +def fetch_stats(github_repo: str) -> dict[str, Any]: """Task 1: Fetch the statistics for a GitHub repo""" - + rate_limit("github-api") return httpx.get(f"https://api.github.com/repos/{github_repo}").json() @task -def get_stars(repo_stats: dict): +def get_stars(repo_stats: dict[str, Any]) -> int: """Task 2: Get the number of stars from GitHub repo statistics""" + return repo_stats["stargazers_count"] - return repo_stats['stargazers_count'] + +@flow(log_prints=True) +def show_stars(github_repos: list[str]) -> None: + """Flow: Show number of GitHub repo stars""" + + # Task 1: Make HTTP requests concurrently + stats_futures = fetch_stats.map(github_repos) + + # Task 2: Once each concurrent task completes, get the star counts + stars = get_stars.map(stats_futures).result() + + # Show the results + for repo, star_count in zip(github_repos, stars): + print(f"{repo}: {star_count} stars") # Run the flow @@ -324,48 +317,44 @@ if __name__ == "__main__": This is what your flow looks like after applying all of these improvements: ```python my_data_pipeline.py +from typing import Any from datetime import timedelta -import httpx +import httpx from prefect import flow, task from prefect.cache_policies import INPUTS from prefect.concurrency.sync import rate_limit +@task( + retries=3, + cache_policy=INPUTS, + cache_expiration=timedelta(days=1) +) +def fetch_stats(github_repo: str) -> dict[str, Any]: + """Task 1: Fetch the statistics for a GitHub repo""" + rate_limit("github-api") + return httpx.get(f"https://api.github.com/repos/{github_repo}").json() -@flow(log_prints=True) -def show_stars(github_repos: list[str]): - """Flow: Show the number of stars that GitHub repos have""" - - # Task 1: Make HTTP requests concurrently while respecting concurrency limits - repo_stats = [] - for repo in github_repos: - rate_limit("github-api") - repo_stats.append({ - 'repo': repo, - 'task': fetch_stats.submit(repo) # Submit each task to a task runner - }) - - # Task 2: Once each concurrent task completes, show the results - for repo in repo_stats: - repo_name = repo['repo'] - stars = get_stars(repo['task'].result()) # Block until the task has completed - print(f"{repo_name}: {stars} stars") +@task +def get_stars(repo_stats: dict[str, Any]) -> int: + """Task 2: Get the number of stars from GitHub repo statistics""" + return repo_stats["stargazers_count"] -@task(retries=3, cache_policy=INPUTS, cache_expiration=timedelta(days=1)) -def fetch_stats(github_repo: str): - """Task 1: Fetch the statistics for a GitHub repo""" - api_response = httpx.get(f"https://api.github.com/repos/{github_repo}") - api_response.raise_for_status() # Force a retry if you don't get a 2xx status code - return api_response.json() +@flow(log_prints=True) +def show_stars(github_repos: list[str]) -> None: + """Flow: Show number of GitHub repo stars""" + # Task 1: Make HTTP requests concurrently + stats_futures = fetch_stats.map(github_repos) -@task -def get_stars(repo_stats: dict): - """Task 2: Get the number of stars from GitHub repo statistics""" + # Task 2: Once each concurrent task completes, get the star counts + stars = get_stars.map(stats_futures).result() - return repo_stats['stargazers_count'] + # Show the results + for repo, star_count in zip(github_repos, stars): + print(f"{repo}: {star_count} stars") # Run the flow @@ -383,25 +372,24 @@ Run your flow twice: once to run the tasks and cache the result, again to retrie # Run the tasks and cache the results python my_data_pipeline.py -# Retrieve the cached results +# Run again (notice the cached results) python my_data_pipeline.py ``` The terminal output from the second flow run should look like this: ```bash -09:08:12.265 | INFO | prefect.engine - Created flow run 'laughing-nightingale' for flow 'show-stars' -09:08:12.266 | INFO | prefect.engine - View at http://127.0.0.1:4200/runs/flow-run/541864e8-12f7-4890-9397-b2ed361f6b20 -09:08:12.322 | INFO | Task run 'fetch_stats-0c9' - Finished in state Cached(type=COMPLETED) -09:08:12.359 | INFO | Task run 'fetch_stats-e89' - Finished in state Cached(type=COMPLETED) -09:08:12.360 | INFO | Task run 'get_stars-b51' - Finished in state Completed() -09:08:12.361 | INFO | Flow run 'laughing-nightingale' - PrefectHQ/prefect: 17320 stars -09:08:12.372 | INFO | Task run 'fetch_stats-8ef' - Finished in state Cached(type=COMPLETED) -09:08:12.374 | INFO | Task run 'get_stars-08d' - Finished in state Completed() -09:08:12.374 | INFO | Flow run 'laughing-nightingale' - pydantic/pydantic: 186319 stars -09:08:12.387 | INFO | Task run 'get_stars-2af' - Finished in state Completed() -09:08:12.387 | INFO | Flow run 'laughing-nightingale' - huggingface/transformers: 134849 stars -09:08:12.404 | INFO | Flow run 'laughing-nightingale' - Finished in state Completed() +20:03:04.398 | INFO | prefect.engine - Created flow run 'laughing-nightingale' for flow 'show-stars' +20:03:05.146 | INFO | Task run 'fetch_stats-90f' - Finished in state Cached(type=COMPLETED) +20:03:05.149 | INFO | Task run 'fetch_stats-258' - Finished in state Cached(type=COMPLETED) +20:03:05.153 | INFO | Task run 'fetch_stats-924' - Finished in state Cached(type=COMPLETED) +20:03:05.159 | INFO | Task run 'get_stars-3a9' - Finished in state Completed() +20:03:05.159 | INFO | Task run 'get_stars-ed3' - Finished in state Completed() +20:03:05.161 | INFO | Task run 'get_stars-39c' - Finished in state Completed() +20:03:05.162 | INFO | Flow run 'laughing-nightingale' - PrefectHQ/prefect: 17756 stars +20:03:05.163 | INFO | Flow run 'laughing-nightingale' - pydantic/pydantic: 21613 stars +20:03:05.163 | INFO | Flow run 'laughing-nightingale' - huggingface/transformers: 136166 stars +20:03:05.339 | INFO | Flow run 'laughing-nightingale' - Finished in state Completed() ``` ## Next steps @@ -418,4 +406,4 @@ You'll use error handling, pagination, and nested flows to scrape data from GitH Need help? [Book a meeting](https://calendly.com/prefect-experts/prefect-product-advocates?utm_campaign=prefect_docs_cloud&utm_content=prefect_docs&utm_medium=docs&utm_source=docs) with a Prefect Product Advocate to get your questions answered. - + \ No newline at end of file diff --git a/docs/v3/tutorials/platform.mdx b/docs/v3/tutorials/platform.mdx index 07f5543642d7..e5f4836a04be 100644 --- a/docs/v3/tutorials/platform.mdx +++ b/docs/v3/tutorials/platform.mdx @@ -13,7 +13,7 @@ To complete this tutorial, you'll need: - Git - Docker - Python 3.9 or newer -- A forever free [Prefect Cloud](https://app.prefect.cloud/) account +- A paid [Prefect Cloud](https://app.prefect.cloud/) account (free accounts do not support multiple workspaces) You'll also need to clone the Prefect demo repository and install the prefect library: @@ -37,9 +37,9 @@ prefect --version We recommend one workspace per development environment. In this tutorial, we'll create a production workspace and a staging workspace. -To create a workspace on Prefect Cloud, you'll need a forever free account. +To create multiple workspaces on Prefect Cloud, you'll need a paid account. -1. Head to https://app.prefect.cloud/ and sign in or create an account. +1. Head to https://app.prefect.cloud/ and sign in to your paid account. 1. If you haven't created a workspace yet, you'll be prompted to choose a name for your workspace. Name your first workspace `production`. @@ -101,13 +101,13 @@ Run the following script from the demo repository to create flow runs in each wo ```bash # Run flows in the production workspace -prefect cloud workspace --set "/production" +prefect cloud workspace set --workspace "/production" python simulate_failures.py ``` ```bash # Run flows in the staging workspace -prefect cloud workspace --set "/staging" +prefect cloud workspace set --workspace "/staging" python simulate_failures.py --fail-at-run 3 ``` @@ -188,6 +188,8 @@ If this doesn't perfectly match your use case, here are some variations you can - You can [write flows from scratch](/v3/develop/write-flows). - You can [automate deployments with GitHub Actions](/v3/deploy/infrastructure-concepts/deploy-ci-cd). +Next, learn how to [debug a flow run](/v3/tutorials/debug) when things go wrong. + Need help? [Book a meeting](https://calendly.com/prefect-experts/prefect-product-advocates?utm_campaign=prefect_docs_cloud&utm_content=prefect_docs&utm_medium=docs&utm_source=docs) with a Prefect Product Advocate to get your questions answered. diff --git a/flows/docker_deploy.py b/flows/docker_deploy.py new file mode 100644 index 000000000000..8abd345ff6f9 --- /dev/null +++ b/flows/docker_deploy.py @@ -0,0 +1,105 @@ +import asyncio +import subprocess +import sys +from pathlib import Path +from textwrap import dedent + +from prefect import flow, get_client +from prefect.deployments import run_deployment +from prefect.docker.docker_image import DockerImage + + +async def read_flow_run(flow_run_id: str): + """Read a flow run's state using the Prefect client.""" + async with get_client() as client: + return await client.read_flow_run(flow_run_id) + + +@flow +def flow_that_needs_pandas() -> str: + """A flow that needs pandas.""" + import pandas + + df = pandas.DataFrame({"a": [1, 2, 3]}) + + assert isinstance(df, pandas.DataFrame) + + return "we're done" + + +def main(): + """ + Deploy and run a flow in a Docker container with runtime package installation using uv. + Demonstrates using EXTRA_PIP_PACKAGES to install dependencies at runtime. + """ + try: + subprocess.check_call( + ["prefect", "work-pool", "create", "test-docker-pool", "-t", "docker"], + stdout=sys.stdout, + stderr=sys.stderr, + ) + + dockerfile = Path("docker-deploy.Dockerfile") + + dockerfile.write_text( + dedent( + """ + FROM prefecthq/prefect:3-latest + COPY flows/docker_deploy.py /opt/prefect/flows/docker_deploy.py + """ + ) + ) + + flow_that_needs_pandas.deploy( + name="docker-demo-deployment", + work_pool_name="test-docker-pool", + job_variables={ + "env": {"EXTRA_PIP_PACKAGES": "pandas"}, + "image": "prefect-integration-test-docker-deploy", + }, + image=DockerImage( + name="prefect-integration-test-docker-deploy", + dockerfile=str(dockerfile), + ), + build=True, + push=False, + ) + + dockerfile.unlink() + + flow_run = run_deployment( + "flow-that-needs-pandas/docker-demo-deployment", + timeout=0, + ) + + # Execute the flow run + subprocess.check_call( + [ + "prefect", + "worker", + "start", + "--pool", + "test-docker-pool", + "--run-once", + "--install-policy", + "if-not-present", + ], + stdout=sys.stdout, + stderr=sys.stderr, + ) + + # Check the flow run state + flow_run = asyncio.run(read_flow_run(flow_run.id)) + assert flow_run.state.is_completed(), flow_run.state + + finally: + # Cleanup + subprocess.check_call( + ["prefect", "--no-prompt", "work-pool", "delete", "test-docker-pool"], + stdout=sys.stdout, + stderr=sys.stderr, + ) + + +if __name__ == "__main__": + main() diff --git a/flows/worker.py b/flows/worker.py index 8a73f44029dd..9c8e109a7a0d 100644 --- a/flows/worker.py +++ b/flows/worker.py @@ -4,11 +4,10 @@ from threading import Thread from typing import List -from pydantic_extra_types.pendulum_dt import DateTime - from prefect.events import Event from prefect.events.clients import get_events_subscriber from prefect.events.filters import EventFilter, EventNameFilter, EventOccurredFilter +from prefect.types import DateTime async def watch_worker_events(events: List[Event]): diff --git a/requirements-client.txt b/requirements-client.txt index de5e2b5ab1e5..e5424a1c85c1 100644 --- a/requirements-client.txt +++ b/requirements-client.txt @@ -26,6 +26,7 @@ pydantic_extra_types >= 2.8.2, < 3.0.0 pydantic_settings > 2.2.1 python_dateutil >= 2.8.2, < 3.0.0 python-slugify >= 5.0, < 9.0 +python-socks[asyncio] >= 2.5.3, < 3.0 pyyaml >= 5.4.1, < 7.0.0 rfc3339-validator >= 0.1.4, < 0.2.0 rich >= 11.0, < 14.0 diff --git a/schemas/settings.schema.json b/schemas/settings.schema.json index 686c847b0d2e..dafc013944e1 100644 --- a/schemas/settings.schema.json +++ b/schemas/settings.schema.json @@ -327,6 +327,15 @@ ], "title": "Telemetry Enabled", "type": "boolean" + }, + "lineage_events_enabled": { + "default": false, + "description": "If `True`, enables emitting lineage events. Set to `False` to disable lineage event emission.", + "supported_environment_variables": [ + "PREFECT_EXPERIMENTS_LINEAGE_EVENTS_ENABLED" + ], + "title": "Lineage Events Enabled", + "type": "boolean" } }, "title": "ExperimentsSettings", diff --git a/scripts/entrypoint.sh b/scripts/entrypoint.sh index 1846618dbce1..3c780321ef1d 100755 --- a/scripts/entrypoint.sh +++ b/scripts/entrypoint.sh @@ -8,8 +8,8 @@ if [ -f ~/.bashrc ]; then fi if [ ! -z "$EXTRA_PIP_PACKAGES" ]; then - echo "+pip install $EXTRA_PIP_PACKAGES" - pip install $EXTRA_PIP_PACKAGES + echo "+uv pip install $EXTRA_PIP_PACKAGES" + uv pip install --system $EXTRA_PIP_PACKAGES fi if [ -z "$*" ]; then diff --git a/scripts/generate_sdk_docs.py b/scripts/generate_sdk_docs.py index 4fd21871ef39..7a9847356650 100644 --- a/scripts/generate_sdk_docs.py +++ b/scripts/generate_sdk_docs.py @@ -15,7 +15,7 @@ def docs_path() -> Path: SKIPPED = [ "prefect._internal", - "prefect.server.database.migrations", + "prefect.server.database._migrations", ] diff --git a/scripts/proxy-test/Dockerfile b/scripts/proxy-test/Dockerfile new file mode 100644 index 000000000000..93b6c4db9107 --- /dev/null +++ b/scripts/proxy-test/Dockerfile @@ -0,0 +1,11 @@ +FROM python:3.11-slim + +WORKDIR /app + +COPY requirements.txt . +RUN pip install uv +RUN uv pip install --no-cache-dir --system -r requirements.txt + +COPY server.py . + +CMD ["uvicorn", "server:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/scripts/proxy-test/README.md b/scripts/proxy-test/README.md new file mode 100644 index 000000000000..76a8c8fdc55f --- /dev/null +++ b/scripts/proxy-test/README.md @@ -0,0 +1,9 @@ +This is a simple test to ensure we can make a websocket connection through a proxy server. It sets up a +simple server and a squid proxy server. The proxy server is inaccessible from the host machine, so we +can confirm the proxy connection is working. + +``` +$ uv pip install -r requirements.txt +$ docker compose up --build +$ python client.py +``` diff --git a/scripts/proxy-test/client.py b/scripts/proxy-test/client.py new file mode 100644 index 000000000000..e07e8450b29a --- /dev/null +++ b/scripts/proxy-test/client.py @@ -0,0 +1,28 @@ +import asyncio +import os + +from prefect.events.clients import websocket_connect + +PROXY_URL = "http://localhost:3128" +WS_SERVER_URL = "ws://server:8000/ws" + + +async def test_websocket_proxy_with_compat(): + """WebSocket through proxy with proxy compatibility code - should work""" + os.environ["HTTP_PROXY"] = PROXY_URL + + async with websocket_connect(WS_SERVER_URL) as websocket: + message = "Hello!" + await websocket.send(message) + response = await websocket.recv() + print("Response: ", response) + assert response == f"Server received: {message}" + + +async def main(): + print("Testing WebSocket through proxy with compatibility code") + await test_websocket_proxy_with_compat() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/proxy-test/docker-compose.yml b/scripts/proxy-test/docker-compose.yml new file mode 100644 index 000000000000..ab26fdaae98c --- /dev/null +++ b/scripts/proxy-test/docker-compose.yml @@ -0,0 +1,20 @@ +services: + server: + build: . + networks: + - internal_net + + forward_proxy: + image: ubuntu/squid + ports: + - "3128:3128" + volumes: + - ./squid.conf:/etc/squid/squid.conf + networks: + - internal_net + - external_net + +networks: + internal_net: + internal: true + external_net: diff --git a/scripts/proxy-test/requirements.txt b/scripts/proxy-test/requirements.txt new file mode 100644 index 000000000000..9cfd79360c5a --- /dev/null +++ b/scripts/proxy-test/requirements.txt @@ -0,0 +1,6 @@ +fastapi==0.111.1 +uvicorn==0.28.1 +uv==0.5.7 +websockets==13.1 +python-socks==2.5.3 +httpx==0.28.1 diff --git a/scripts/proxy-test/server.py b/scripts/proxy-test/server.py new file mode 100644 index 000000000000..4f4498d04a7a --- /dev/null +++ b/scripts/proxy-test/server.py @@ -0,0 +1,10 @@ +from fastapi import FastAPI, WebSocket + +app = FastAPI() + + +@app.websocket("/ws") +async def websocket_endpoint(websocket: WebSocket): + await websocket.accept() + async for data in websocket.iter_text(): + await websocket.send_text(f"Server received: {data}") diff --git a/scripts/proxy-test/squid.conf b/scripts/proxy-test/squid.conf new file mode 100644 index 000000000000..8c9261d02d61 --- /dev/null +++ b/scripts/proxy-test/squid.conf @@ -0,0 +1,5 @@ +http_port 3128 +acl CONNECT method CONNECT +acl SSL_ports port 443 8000 +http_access allow CONNECT SSL_ports +http_access allow all diff --git a/scripts/pyright_diff.py b/scripts/pyright_diff.py new file mode 100644 index 000000000000..17dee6f93172 --- /dev/null +++ b/scripts/pyright_diff.py @@ -0,0 +1,87 @@ +import json +import sys +from typing import Any, Dict, NamedTuple + + +class Diagnostic(NamedTuple): + """Structured representation of a diagnostic for easier table formatting.""" + + file: str + line: int + character: int + severity: str + message: str + + +def normalize_diagnostic(diagnostic: Dict[Any, Any]) -> Dict[Any, Any]: + """Normalize a diagnostic by removing or standardizing volatile fields.""" + normalized = diagnostic.copy() + normalized.pop("time", None) + normalized.pop("version", None) + return normalized + + +def load_and_normalize_file(file_path: str) -> Dict[Any, Any]: + """Load a JSON file and normalize its contents.""" + with open(file_path, "r") as f: + data = json.load(f) + return normalize_diagnostic(data) + + +def parse_diagnostic(diag: Dict[Any, Any]) -> Diagnostic: + """Convert a diagnostic dict into a Diagnostic object.""" + file = diag.get("file", "unknown_file") + message = diag.get("message", "no message") + range_info = diag.get("range", {}) + start = range_info.get("start", {}) + line = start.get("line", 0) + char = start.get("character", 0) + severity = diag.get("severity", "unknown") + + return Diagnostic(file, line, char, severity, message) + + +def format_markdown_table(diagnostics: list[Diagnostic]) -> str: + """Format list of diagnostics as a markdown table.""" + if not diagnostics: + return "\nNo new errors found!" + + table = ["| File | Location | Message |", "|------|----------|---------|"] + + for diag in sorted(diagnostics, key=lambda x: (x.file, x.line, x.character)): + # Escape pipe characters and replace newlines with HTML breaks + message = diag.message.replace("|", "\\|").replace("\n", "
") + location = f"L{diag.line}:{diag.character}" + table.append(f"| {diag.file} | {location} | {message} |") + + return "\n".join(table) + + +def compare_pyright_outputs(base_file: str, new_file: str) -> None: + """Compare two pyright JSON output files and display only new errors.""" + base_data = load_and_normalize_file(base_file) + new_data = load_and_normalize_file(new_file) + + # Group diagnostics by file + base_diags = set() + new_diags = set() + + # Process diagnostics from type completeness symbols + for data, diag_set in [(base_data, base_diags), (new_data, new_diags)]: + for symbol in data.get("typeCompleteness", {}).get("symbols", []): + for diag in symbol.get("diagnostics", []): + if diag.get("severity", "") == "error": + diag_set.add(parse_diagnostic(diag)) + + # Find new errors + new_errors = list(new_diags - base_diags) + + print(format_markdown_table(new_errors)) + + +if __name__ == "__main__": + if len(sys.argv) != 3: + print("Usage: python pyright_diff.py ") + sys.exit(1) + + compare_pyright_outputs(sys.argv[1], sys.argv[2]) diff --git a/setup.cfg b/setup.cfg index fec375d062b9..f32eb182605d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -98,7 +98,8 @@ branch = True exclude_lines = # Don't complain about abstract methods, they aren't run: @(abc\.)?abstractmethod - + # if TYPE_CHECKING: lines are never nun + if TYPE_CHECKING: omit = src/prefect/server/database/migrations/versions/* diff --git a/src/integrations/prefect-azure/pyproject.toml b/src/integrations/prefect-azure/pyproject.toml index 60bb213454e3..84d0cb3deb87 100644 --- a/src/integrations/prefect-azure/pyproject.toml +++ b/src/integrations/prefect-azure/pyproject.toml @@ -27,7 +27,7 @@ dependencies = [ "azure_identity>=1.10", "azure_mgmt_containerinstance>=10.0", "azure-mgmt-resource>=21.2", - "prefect>=3.0.0", + "prefect>=3.1.1", "setuptools", #required in 3.12 to get pkg_resources (used by azureml.core) ] dynamic = ["version"] diff --git a/src/integrations/prefect-dbt/prefect_dbt/cli/configs/base.py b/src/integrations/prefect-dbt/prefect_dbt/cli/configs/base.py index 4aff4a005c1c..1104fc0b7a6c 100644 --- a/src/integrations/prefect-dbt/prefect_dbt/cli/configs/base.py +++ b/src/integrations/prefect-dbt/prefect_dbt/cli/configs/base.py @@ -2,11 +2,13 @@ import abc from pathlib import Path -from typing import Any, Dict, Optional +from typing import Any, Dict, Optional, Type from pydantic import BaseModel, Field +from typing_extensions import Self from prefect.blocks.core import Block +from prefect_dbt.utilities import load_profiles_yml class DbtConfigs(Block, abc.ABC): @@ -147,6 +149,79 @@ class TargetConfigs(BaseTargetConfigs): _logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa _documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa + @classmethod + def from_profiles_yml( + cls: Type[Self], + profile_name: Optional[str] = None, + target_name: Optional[str] = None, + profiles_dir: Optional[str] = None, + allow_field_overrides: bool = False, + ) -> "TargetConfigs": + """ + Create a TargetConfigs instance from a dbt profiles.yml file. + + Args: + profile_name: Name of the profile to use from profiles.yml. + If None, uses the first profile. + target_name: Name of the target to use from the profile. + If None, uses the default target in the selected profile. + profiles_dir: Path to the directory containing profiles.yml. + If None, uses the default profiles directory. + allow_field_overrides: If enabled, fields from dbt target configs + will override fields provided in extras and credentials. + + Returns: + A TargetConfigs instance populated from the profiles.yml target. + + Raises: + ValueError: If profiles.yml is not found or if profile/target is invalid + """ + profiles = load_profiles_yml(profiles_dir) + + # If no profile specified, use first non-config one + if profile_name is None: + for name in profiles: + if name != "config": + profile_name = name + break + elif profile_name not in profiles: + raise ValueError(f"Profile {profile_name} not found in profiles.yml") + + profile = profiles[profile_name] + if "outputs" not in profile: + raise ValueError(f"No outputs found in profile {profile_name}") + + outputs = profile["outputs"] + + # If no target specified, use default target + if target_name is None: + target_name = profile["target"] + elif target_name not in outputs: + raise ValueError( + f"Target {target_name} not found in profile {profile_name}" + ) + + target_config = outputs[target_name] + + type = target_config.pop("type") + schema = None + possible_keys = ["schema", "path", "dataset", "database"] + for key in possible_keys: + if key in target_config: + schema = target_config.pop(key) + break + + if schema is None: + raise ValueError(f"No schema found. Expected one of: {possible_keys}") + threads = target_config.pop("threads", 4) + return cls( + type=type, + schema=schema, + threads=threads, + extras=target_config or None, + allow_field_overrides=allow_field_overrides, + ) + class GlobalConfigs(DbtConfigs): """ diff --git a/src/integrations/prefect-dbt/prefect_dbt/utilities.py b/src/integrations/prefect-dbt/prefect_dbt/utilities.py new file mode 100644 index 000000000000..9430e869ef0a --- /dev/null +++ b/src/integrations/prefect-dbt/prefect_dbt/utilities.py @@ -0,0 +1,40 @@ +""" +Utility functions for prefect-dbt +""" +import os +from typing import Any, Dict, Optional + +import yaml + + +def get_profiles_dir() -> str: + """Get the dbt profiles directory from environment or default location.""" + profiles_dir = os.getenv("DBT_PROFILES_DIR") + if not profiles_dir: + profiles_dir = os.path.expanduser("~/.dbt") + return profiles_dir + + +def load_profiles_yml(profiles_dir: Optional[str]) -> Dict[str, Any]: + """ + Load and parse the profiles.yml file. + + Args: + profiles_dir: Path to the directory containing profiles.yml. + If None, uses the default profiles directory. + + Returns: + Dict containing the parsed profiles.yml contents + + Raises: + ValueError: If profiles.yml is not found + """ + if profiles_dir is None: + profiles_dir = get_profiles_dir() + + profiles_path = os.path.join(profiles_dir, "profiles.yml") + if not os.path.exists(profiles_path): + raise ValueError(f"No profiles.yml found at {profiles_path}") + + with open(profiles_path, "r") as f: + return yaml.safe_load(f) diff --git a/src/integrations/prefect-dbt/tests/cli/configs/test_base.py b/src/integrations/prefect-dbt/tests/cli/configs/test_base.py index dafd1aa625c1..3d5dd39a68c5 100644 --- a/src/integrations/prefect-dbt/tests/cli/configs/test_base.py +++ b/src/integrations/prefect-dbt/tests/cli/configs/test_base.py @@ -1,8 +1,48 @@ from pathlib import Path +from unittest.mock import patch import pytest from prefect_dbt.cli.configs.base import GlobalConfigs, TargetConfigs +SAMPLE_PROFILES = { + "jaffle_shop": { + "outputs": { + "dev": { + "type": "duckdb", + "path": "jaffle_shop.duckdb", + "schema": "main", + "threads": 4, + }, + "prod": { + "type": "duckdb", + "path": "/data/prod/jaffle_shop.duckdb", + "schema": "main", + "threads": 8, + }, + }, + "target": "prod", + }, + "other_project": { + "outputs": { + "dev": { + "type": "duckdb", + "path": "other_project.duckdb", + "schema": "analytics", + "threads": 4, + } + }, + "target": "dev", + }, + "config": {"partial_parse": True}, +} + + +@pytest.fixture +def mock_load_profiles(): + with patch("prefect_dbt.cli.configs.base.load_profiles_yml") as mock: + mock.return_value = SAMPLE_PROFILES + yield mock + def test_target_configs_get_configs(): target_configs = TargetConfigs( @@ -41,3 +81,79 @@ def test_global_configs(): global_configs = GlobalConfigs(log_format="json", send_anonymous_usage_stats=False) assert global_configs.log_format == "json" assert global_configs.send_anonymous_usage_stats is False + + +def test_from_profiles_yml_default_profile_target(mock_load_profiles): + target_configs = TargetConfigs.from_profiles_yml() + + assert target_configs.type == "duckdb" + assert target_configs.schema_ == "main" + assert target_configs.threads == 8 + assert target_configs.extras == {"path": "/data/prod/jaffle_shop.duckdb"} + + +def test_from_profiles_yml_explicit_profile_target(mock_load_profiles): + target_configs = TargetConfigs.from_profiles_yml( + profile_name="other_project", target_name="dev" + ) + + assert target_configs.type == "duckdb" + assert target_configs.schema_ == "analytics" + assert target_configs.threads == 4 + assert target_configs.extras == {"path": "other_project.duckdb"} + + +def test_from_profiles_yml_invalid_profile(mock_load_profiles): + with pytest.raises(ValueError, match="Profile invalid_profile not found"): + TargetConfigs.from_profiles_yml(profile_name="invalid_profile") + + +def test_from_profiles_yml_invalid_target(mock_load_profiles): + with pytest.raises(ValueError, match="Target invalid_target not found"): + TargetConfigs.from_profiles_yml( + profile_name="jaffle_shop", target_name="invalid_target" + ) + + +def test_from_profiles_yml_no_outputs(mock_load_profiles): + mock_load_profiles.return_value = {"broken": {"some_other_key": {}}} + with pytest.raises(ValueError, match="No outputs found in profile broken"): + TargetConfigs.from_profiles_yml(profile_name="broken") + + +def test_from_profiles_yml_no_schema(mock_load_profiles): + mock_load_profiles.return_value = { + "test": { + "outputs": { + "dev": { + "type": "postgres", + "threads": 4, + # Missing schema field + "host": "localhost", + } + }, + "target": "dev", + } + } + with pytest.raises(ValueError, match="No schema found"): + TargetConfigs.from_profiles_yml(profile_name="test") + + +def test_from_profiles_yml_alternative_schema_keys(mock_load_profiles): + mock_profiles = { + "test": { + "outputs": { + "dev": { + "type": "bigquery", + "threads": 4, + "dataset": "my_dataset", # Alternative to schema + "project": "my_project", + } + }, + "target": "dev", + } + } + mock_load_profiles.return_value = mock_profiles + + target_configs = TargetConfigs.from_profiles_yml(profile_name="test") + assert target_configs.schema_ == "my_dataset" diff --git a/src/integrations/prefect-dbt/tests/cloud/test_jobs.py b/src/integrations/prefect-dbt/tests/cloud/test_jobs.py index 621dd27e260d..28d33bc2ebd0 100644 --- a/src/integrations/prefect-dbt/tests/cloud/test_jobs.py +++ b/src/integrations/prefect-dbt/tests/cloud/test_jobs.py @@ -2,6 +2,7 @@ import os import pytest +import respx from httpx import Response from prefect_dbt.cloud.credentials import DbtCloudCredentials from prefect_dbt.cloud.exceptions import ( @@ -37,12 +38,6 @@ def dbt_cloud_job(dbt_cloud_credentials): return DbtCloudJob(job_id=10000, dbt_cloud_credentials=dbt_cloud_credentials) -@pytest.fixture -def respx_mock_with_pass_through(respx_mock): - respx_mock.route(host="127.0.0.1").pass_through() - return respx_mock - - HEADERS = { "Authorization": "Bearer my_api_key", "x-dbt-partner-source": "prefect", @@ -51,403 +46,390 @@ def respx_mock_with_pass_through(respx_mock): class TestTriggerDbtCloudJobRun: - async def test_get_dbt_cloud_job_info( - self, respx_mock_with_pass_through, dbt_cloud_credentials - ): - respx_mock_with_pass_through.route(host="127.0.0.1").pass_through() - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/12/", - headers=HEADERS, - ).mock(return_value=Response(200, json={"data": {"id": 10000}})) - - response = await get_dbt_cloud_job_info.fn( - dbt_cloud_credentials=dbt_cloud_credentials, - job_id=12, - order_by="id", - ) - - assert response == {"id": 10000} - - async def test_trigger_job_with_no_options( - self, respx_mock_with_pass_through, dbt_cloud_credentials - ): - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, json={"data": {"id": 10000, "project_id": 12345}} + async def test_get_dbt_cloud_job_info(self, dbt_cloud_credentials): + with respx.mock(using="httpx", assert_all_called=False) as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/12/", + headers=HEADERS, + ).mock(return_value=Response(200, json={"data": {"id": 10000}})) + + response = await get_dbt_cloud_job_info.fn( + dbt_cloud_credentials=dbt_cloud_credentials, + job_id=12, + order_by="id", ) - ) - with disable_run_logger(): - result = await trigger_dbt_cloud_job_run.fn( - dbt_cloud_credentials=dbt_cloud_credentials, - job_id=1, + assert response == {"id": 10000} + + async def test_trigger_job_with_no_options(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, json={"data": {"id": 10000, "project_id": 12345}} + ) ) - assert result == {"id": 10000, "project_id": 12345} + with disable_run_logger(): + result = await trigger_dbt_cloud_job_run.fn( + dbt_cloud_credentials=dbt_cloud_credentials, + job_id=1, + ) - request_body = json.loads( - respx_mock_with_pass_through.calls.last.request.content.decode() - ) - assert "Triggered via Prefect" in request_body["cause"] + assert result == {"id": 10000, "project_id": 12345} - async def test_trigger_with_custom_options( - self, respx_mock_with_pass_through, dbt_cloud_credentials - ): - respx_mock_with_pass_through.route(host="127.0.0.1").pass_through() - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", - headers=HEADERS, - json={ - "cause": "This is a custom cause", - "git_branch": "staging", - "schema_override": "dbt_cloud_pr_123", - "dbt_version_override": "0.18.0", - "threads_override": 8, - "target_name_override": "staging", - "generate_docs_override": True, - "timeout_seconds_override": 3000, - "steps_override": [ - "dbt seed", - "dbt run --fail-fast", - "dbt test --fail fast", - ], - }, - ).mock( - return_value=Response( - 200, json={"data": {"id": 10000, "project_id": 12345}} - ) - ) + request_body = json.loads(respx_mock.calls.last.request.content.decode()) + assert "Triggered via Prefect" in request_body["cause"] - @flow - async def test_trigger_with_custom_options(): - return await trigger_dbt_cloud_job_run( - dbt_cloud_credentials=dbt_cloud_credentials, - job_id=1, - options=TriggerJobRunOptions( - cause="This is a custom cause", - git_branch="staging", - schema_override="dbt_cloud_pr_123", - dbt_version_override="0.18.0", - target_name_override="staging", - timeout_seconds_override=3000, - generate_docs_override=True, - threads_override=8, - steps_override=[ + async def test_trigger_with_custom_options(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", + headers=HEADERS, + json={ + "cause": "This is a custom cause", + "git_branch": "staging", + "schema_override": "dbt_cloud_pr_123", + "dbt_version_override": "0.18.0", + "threads_override": 8, + "target_name_override": "staging", + "generate_docs_override": True, + "timeout_seconds_override": 3000, + "steps_override": [ "dbt seed", "dbt run --fail-fast", "dbt test --fail fast", ], - ), - ) - - result = await test_trigger_with_custom_options() - assert result == {"id": 10000, "project_id": 12345} - - async def test_trigger_nonexistent_job( - self, respx_mock_with_pass_through, dbt_cloud_credentials - ): - respx_mock_with_pass_through.route(host="127.0.0.1").pass_through() - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", - headers=HEADERS, - ).mock( - return_value=Response(404, json={"status": {"user_message": "Not found!"}}) - ) - - @flow - async def test_trigger_nonexistent_job(): - task_shorter_retry = trigger_dbt_cloud_job_run.with_options( - retries=1, retry_delay_seconds=1 - ) - await task_shorter_retry( - dbt_cloud_credentials=dbt_cloud_credentials, - job_id=1, - ) - - with pytest.raises(DbtCloudJobRunTriggerFailed, match="Not found!"): - await test_trigger_nonexistent_job() + }, + ).mock( + return_value=Response( + 200, json={"data": {"id": 10000, "project_id": 12345}} + ) + ) + + @flow + async def test_trigger_with_custom_options(): + return await trigger_dbt_cloud_job_run( + dbt_cloud_credentials=dbt_cloud_credentials, + job_id=1, + options=TriggerJobRunOptions( + cause="This is a custom cause", + git_branch="staging", + schema_override="dbt_cloud_pr_123", + dbt_version_override="0.18.0", + target_name_override="staging", + timeout_seconds_override=3000, + generate_docs_override=True, + threads_override=8, + steps_override=[ + "dbt seed", + "dbt run --fail-fast", + "dbt test --fail fast", + ], + ), + ) + + result = await test_trigger_with_custom_options() + assert result == {"id": 10000, "project_id": 12345} + + async def test_trigger_nonexistent_job(self, dbt_cloud_credentials): + with respx.mock(using="httpx", assert_all_called=False) as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", + headers=HEADERS, + ).mock( + return_value=Response( + 404, json={"status": {"user_message": "Not found!"}} + ) + ) + + @flow + async def test_trigger_nonexistent_job(): + task_shorter_retry = trigger_dbt_cloud_job_run.with_options( + retries=1, retry_delay_seconds=1 + ) + await task_shorter_retry( + dbt_cloud_credentials=dbt_cloud_credentials, + job_id=1, + ) + + with pytest.raises(DbtCloudJobRunTriggerFailed, match="Not found!"): + await test_trigger_nonexistent_job() async def test_trigger_nonexistent_run_id_no_logs( - self, respx_mock_with_pass_through, dbt_cloud_credentials, caplog + self, dbt_cloud_credentials, caplog ): - respx_mock_with_pass_through.route(host="127.0.0.1").pass_through() - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", - headers=HEADERS, - ).mock(return_value=Response(200, json={"data": {"project_id": 12345}})) - - @flow - async def trigger_nonexistent_run_id(): - task_shorter_retry = trigger_dbt_cloud_job_run.with_options( - retries=1, retry_delay_seconds=1 - ) - await task_shorter_retry( - dbt_cloud_credentials=dbt_cloud_credentials, - job_id=1, - ) - - await trigger_nonexistent_run_id() + with respx.mock(using="httpx", assert_all_called=False) as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", + headers=HEADERS, + ).mock(return_value=Response(200, json={"data": {"project_id": 12345}})) + + @flow + async def trigger_nonexistent_run_id(): + task_shorter_retry = trigger_dbt_cloud_job_run.with_options( + retries=1, retry_delay_seconds=1 + ) + await task_shorter_retry( + dbt_cloud_credentials=dbt_cloud_credentials, + job_id=1, + ) + + await trigger_nonexistent_run_id() class TestTriggerDbtCloudJobRunAndWaitForCompletion: - @pytest.mark.respx(assert_all_called=True) - async def test_run_success( - self, respx_mock_with_pass_through, dbt_cloud_credentials - ): - respx_mock_with_pass_through.route(host="127.0.0.1").pass_through() - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, json={"data": {"id": 10000, "project_id": 12345}} - ) - ) - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", - headers=HEADERS, - ).mock(return_value=Response(200, json={"data": {"id": 10000, "status": 10}})) - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/", - headers=HEADERS, - ).mock(return_value=Response(200, json={"data": ["manifest.json"]})) - - result = await trigger_dbt_cloud_job_run_and_wait_for_completion( - dbt_cloud_credentials=dbt_cloud_credentials, job_id=1 - ) - assert result == { - "id": 10000, - "status": 10, - "artifact_paths": ["manifest.json"], - } - - @pytest.mark.respx(assert_all_called=True) - async def test_run_success_with_wait( - self, respx_mock_with_pass_through, dbt_cloud_credentials - ): - respx_mock_with_pass_through.route(host="127.0.0.1").pass_through() - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, json={"data": {"id": 10000, "project_id": 12345}} - ) - ) - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", - headers=HEADERS, - ).mock( - side_effect=[ - Response(200, json={"data": {"id": 10000, "status": 1}}), - Response(200, json={"data": {"id": 10000, "status": 3}}), - Response(200, json={"data": {"id": 10000, "status": 10}}), - ] - ) - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/", - headers=HEADERS, - ).mock(return_value=Response(200, json={"data": ["manifest.json"]})) - - result = await trigger_dbt_cloud_job_run_and_wait_for_completion( - dbt_cloud_credentials=dbt_cloud_credentials, - job_id=1, - poll_frequency_seconds=1, - ) - assert result == { - "id": 10000, - "status": 10, - "artifact_paths": ["manifest.json"], - } - - @pytest.mark.respx(assert_all_called=True) - async def test_run_failure_with_wait_and_retry( - self, respx_mock_with_pass_through, dbt_cloud_credentials - ): - respx_mock_with_pass_through.route(host="127.0.0.1").pass_through() - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, json={"data": {"id": 10000, "project_id": 12345}} - ) - ) - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", - headers=HEADERS, - ).mock( - side_effect=[ - Response(200, json={"data": {"id": 10000, "status": 1}}), - Response(200, json={"data": {"id": 10000, "status": 3}}), - Response( - 200, json={"data": {"id": 10000, "status": 20}} - ), # failed status - ] - ) - - with pytest.raises(DbtCloudJobRunFailed): - await trigger_dbt_cloud_job_run_and_wait_for_completion( - dbt_cloud_credentials=dbt_cloud_credentials, - job_id=1, - poll_frequency_seconds=1, - retry_filtered_models_attempts=1, - ) - - @pytest.mark.respx(assert_all_called=True) - async def test_run_with_unexpected_status( - self, respx_mock_with_pass_through, dbt_cloud_credentials - ): - respx_mock_with_pass_through.route(host="127.0.0.1").pass_through() - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, json={"data": {"id": 10000, "project_id": 12345}} - ) - ) - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", - headers=HEADERS, - ).mock( - side_effect=[ - Response(200, json={"data": {"id": 10000, "status": 1}}), - Response(200, json={"data": {"id": 10000, "status": 3}}), - Response( - 200, json={"data": {"id": 10000, "status": 42}} - ), # unknown status - ] - ) - - with pytest.raises(ValueError, match="42 is not a valid DbtCloudJobRunStatus"): - await trigger_dbt_cloud_job_run_and_wait_for_completion( - dbt_cloud_credentials=dbt_cloud_credentials, - job_id=1, - poll_frequency_seconds=1, - retry_filtered_models_attempts=0, - ) - - @pytest.mark.respx(assert_all_called=True) - async def test_run_failure_no_run_id( - self, respx_mock_with_pass_through, dbt_cloud_credentials - ): - respx_mock_with_pass_through.route(host="127.0.0.1").pass_through() - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", - headers=HEADERS, - ).mock(return_value=Response(200, json={"data": {"project_id": 12345}})) - - with pytest.raises(RuntimeError, match="Unable to determine run ID"): - await trigger_dbt_cloud_job_run_and_wait_for_completion( - dbt_cloud_credentials=dbt_cloud_credentials, - job_id=1, - poll_frequency_seconds=1, - ) - - @pytest.mark.respx(assert_all_called=True) - async def test_run_cancelled_with_wait( - self, respx_mock_with_pass_through, dbt_cloud_credentials - ): - respx_mock_with_pass_through.route(host="127.0.0.1").pass_through() - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, json={"data": {"id": 10000, "project_id": 12345}} - ) - ) - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", - headers=HEADERS, - ).mock( - side_effect=[ - Response(200, json={"data": {"id": 10000, "status": 1}}), - Response(200, json={"data": {"id": 10000, "status": 3}}), - Response(200, json={"data": {"id": 10000, "status": 30}}), - ] - ) - - with pytest.raises(DbtCloudJobRunCancelled): - await trigger_dbt_cloud_job_run_and_wait_for_completion( - dbt_cloud_credentials=dbt_cloud_credentials, - job_id=1, - poll_frequency_seconds=1, - retry_filtered_models_attempts=0, - ) - - @pytest.mark.respx(assert_all_called=True) - async def test_run_timed_out( - self, respx_mock_with_pass_through, dbt_cloud_credentials - ): - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, json={"data": {"id": 10000, "project_id": 12345}} - ) - ) - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", - headers=HEADERS, - ).mock( - side_effect=[ - Response(200, json={"data": {"id": 10000, "status": 1}}), - Response(200, json={"data": {"id": 10000, "status": 3}}), - Response(200, json={"data": {"id": 10000, "status": 3}}), - Response(200, json={"data": {"id": 10000, "status": 3}}), - ] - ) - - with pytest.raises(DbtCloudJobRunTimedOut): - await trigger_dbt_cloud_job_run_and_wait_for_completion( + async def test_run_success(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, json={"data": {"id": 10000, "project_id": 12345}} + ) + ) + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", + headers=HEADERS, + ).mock( + return_value=Response(200, json={"data": {"id": 10000, "status": 10}}) + ) + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/", + headers=HEADERS, + ).mock(return_value=Response(200, json={"data": ["manifest.json"]})) + + result = await trigger_dbt_cloud_job_run_and_wait_for_completion( + dbt_cloud_credentials=dbt_cloud_credentials, job_id=1 + ) + assert result == { + "id": 10000, + "status": 10, + "artifact_paths": ["manifest.json"], + } + + async def test_run_success_with_wait(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, json={"data": {"id": 10000, "project_id": 12345}} + ) + ) + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", + headers=HEADERS, + ).mock( + side_effect=[ + Response(200, json={"data": {"id": 10000, "status": 1}}), + Response(200, json={"data": {"id": 10000, "status": 3}}), + Response(200, json={"data": {"id": 10000, "status": 10}}), + ] + ) + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/", + headers=HEADERS, + ).mock(return_value=Response(200, json={"data": ["manifest.json"]})) + + result = await trigger_dbt_cloud_job_run_and_wait_for_completion( dbt_cloud_credentials=dbt_cloud_credentials, job_id=1, poll_frequency_seconds=1, - max_wait_seconds=3, - retry_filtered_models_attempts=0, - ) - - @pytest.mark.respx(assert_all_called=True) - async def test_run_success_failed_artifacts( - self, respx_mock_with_pass_through, dbt_cloud_credentials - ): - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, json={"data": {"id": 10000, "project_id": 12345}} - ) - ) - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", - headers=HEADERS, - ).mock(return_value=Response(200, json={"data": {"id": 10000, "status": 10}})) - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/", - headers=HEADERS, - ).mock( - return_value=Response( - 500, json={"status": {"user_message": "This is what went wrong"}} ) - ) - - result = await trigger_dbt_cloud_job_run_and_wait_for_completion( - dbt_cloud_credentials=dbt_cloud_credentials, job_id=1 - ) - assert result == {"id": 10000, "status": 10} + assert result == { + "id": 10000, + "status": 10, + "artifact_paths": ["manifest.json"], + } + + async def test_run_failure_with_wait_and_retry(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, json={"data": {"id": 10000, "project_id": 12345}} + ) + ) + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", + headers=HEADERS, + ).mock( + side_effect=[ + Response(200, json={"data": {"id": 10000, "status": 1}}), + Response(200, json={"data": {"id": 10000, "status": 3}}), + Response( + 200, json={"data": {"id": 10000, "status": 20}} + ), # failed status + ] + ) + + with pytest.raises(DbtCloudJobRunFailed): + await trigger_dbt_cloud_job_run_and_wait_for_completion( + dbt_cloud_credentials=dbt_cloud_credentials, + job_id=1, + poll_frequency_seconds=1, + retry_filtered_models_attempts=1, + ) + + async def test_run_with_unexpected_status(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, json={"data": {"id": 10000, "project_id": 12345}} + ) + ) + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", + headers=HEADERS, + ).mock( + side_effect=[ + Response(200, json={"data": {"id": 10000, "status": 1}}), + Response(200, json={"data": {"id": 10000, "status": 3}}), + Response( + 200, json={"data": {"id": 10000, "status": 42}} + ), # unknown status + ] + ) + + with pytest.raises( + ValueError, match="42 is not a valid DbtCloudJobRunStatus" + ): + await trigger_dbt_cloud_job_run_and_wait_for_completion( + dbt_cloud_credentials=dbt_cloud_credentials, + job_id=1, + poll_frequency_seconds=1, + retry_filtered_models_attempts=0, + ) + + async def test_run_failure_no_run_id(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", + headers=HEADERS, + ).mock(return_value=Response(200, json={"data": {"project_id": 12345}})) + + with pytest.raises(RuntimeError, match="Unable to determine run ID"): + await trigger_dbt_cloud_job_run_and_wait_for_completion( + dbt_cloud_credentials=dbt_cloud_credentials, + job_id=1, + poll_frequency_seconds=1, + ) + + async def test_run_cancelled_with_wait(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, json={"data": {"id": 10000, "project_id": 12345}} + ) + ) + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", + headers=HEADERS, + ).mock( + side_effect=[ + Response(200, json={"data": {"id": 10000, "status": 1}}), + Response(200, json={"data": {"id": 10000, "status": 3}}), + Response(200, json={"data": {"id": 10000, "status": 30}}), + ] + ) + + with pytest.raises(DbtCloudJobRunCancelled): + await trigger_dbt_cloud_job_run_and_wait_for_completion( + dbt_cloud_credentials=dbt_cloud_credentials, + job_id=1, + poll_frequency_seconds=1, + retry_filtered_models_attempts=0, + ) + + async def test_run_timed_out(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, json={"data": {"id": 10000, "project_id": 12345}} + ) + ) + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", + headers=HEADERS, + ).mock( + side_effect=[ + Response(200, json={"data": {"id": 10000, "status": 1}}), + Response(200, json={"data": {"id": 10000, "status": 3}}), + Response(200, json={"data": {"id": 10000, "status": 3}}), + Response(200, json={"data": {"id": 10000, "status": 3}}), + ] + ) + + with pytest.raises(DbtCloudJobRunTimedOut): + await trigger_dbt_cloud_job_run_and_wait_for_completion( + dbt_cloud_credentials=dbt_cloud_credentials, + job_id=1, + poll_frequency_seconds=1, + max_wait_seconds=3, + retry_filtered_models_attempts=0, + ) + + async def test_run_success_failed_artifacts(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, json={"data": {"id": 10000, "project_id": 12345}} + ) + ) + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", + headers=HEADERS, + ).mock( + return_value=Response(200, json={"data": {"id": 10000, "status": 10}}) + ) + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/", + headers=HEADERS, + ).mock( + return_value=Response( + 500, json={"status": {"user_message": "This is what went wrong"}} + ) + ) + + result = await trigger_dbt_cloud_job_run_and_wait_for_completion( + dbt_cloud_credentials=dbt_cloud_credentials, job_id=1 + ) + assert result == {"id": 10000, "status": 10} class TestRetryDbtCloudRunJobSubsetAndWaitForCompletion: - async def test_run_steps_override_error( - self, respx_mock_with_pass_through, dbt_cloud_credentials - ): + async def test_run_steps_override_error(self, dbt_cloud_credentials): with pytest.raises(ValueError, match="Do not set `steps_override"): await retry_dbt_cloud_job_run_subset_and_wait_for_completion( dbt_cloud_credentials=dbt_cloud_credentials, @@ -467,116 +449,117 @@ async def test_retry_run( self, trigger_job_run_options, exe_command, - respx_mock_with_pass_through, dbt_cloud_credentials, ): - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, - json={ - "data": { - "id": 10000, - "generate_docs": False, - "generate_sources": False, - } - }, - ) - ) - - # mock get_dbt_cloud_run_info - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, - json={ - "data": { - "id": 10000, - "status": 20, # failed status - "run_steps": [ - { - "id": 432100123, - "run_id": 10000, - "account_id": 123456789, - "index": 1, - "name": "Clone Git Repository", - "status_humanized": "Success", - }, - { - "id": 432100124, - "run_id": 10000, - "account_id": 123456789, - "index": 2, - "name": "Create Profile from Connection Snowflake ", - "status_humanized": "Success", - }, - { - "id": 432100125, - "run_id": 10000, - "account_id": 123456789, - "index": 3, - "name": "Invoke dbt with `dbt deps`", - "status_humanized": "Success", - }, + with respx.mock(using="httpx", assert_all_called=False) as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, + json={ + "data": { + "id": 10000, + "generate_docs": False, + "generate_sources": False, + } + }, + ) + ) + + # mock get_dbt_cloud_run_info + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, + json={ + "data": { + "id": 10000, + "status": 20, # failed status + "run_steps": [ + { + "id": 432100123, + "run_id": 10000, + "account_id": 123456789, + "index": 1, + "name": "Clone Git Repository", + "status_humanized": "Success", + }, + { + "id": 432100124, + "run_id": 10000, + "account_id": 123456789, + "index": 2, + "name": "Create Profile from Connection Snowflake ", + "status_humanized": "Success", + }, + { + "id": 432100125, + "run_id": 10000, + "account_id": 123456789, + "index": 3, + "name": "Invoke dbt with `dbt deps`", + "status_humanized": "Success", + }, + { + "run_id": 10000, + "account_id": 123456789, + "index": 4, + "name": f"Invoke dbt with `dbt {exe_command}`", + "status_humanized": "Error", + }, + ], + "job_id": "1", + } + }, + ) + ) + + # mock list_dbt_cloud_run_artifacts + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/", + headers=HEADERS, + ).mock(return_value=Response(200, json={"data": ["run_results.json"]})) + + # mock get_dbt_cloud_run_artifact + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/run_results.json", # noqa + headers=HEADERS, + ).mock( + return_value=Response( + 200, + json={ + "metadata": {"env": {"DBT_CLOUD_JOB_ID": "1"}}, + "results": [ { - "run_id": 10000, - "account_id": 123456789, - "index": 4, - "name": f"Invoke dbt with `dbt {exe_command}`", - "status_humanized": "Error", + "status": "fail", + "message": "FAIL 1", + "failures": None, + "unique_id": "model.jaffle_shop.stg_customers", }, ], - "job_id": "1", - } - }, - ) - ) - - # mock list_dbt_cloud_run_artifacts - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/", - headers=HEADERS, - ).mock(return_value=Response(200, json={"data": ["run_results.json"]})) - - # mock get_dbt_cloud_run_artifact - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/run_results.json", # noqa - headers=HEADERS, - ).mock( - return_value=Response( - 200, - json={ - "metadata": {"env": {"DBT_CLOUD_JOB_ID": "1"}}, - "results": [ - { - "status": "fail", - "message": "FAIL 1", - "failures": None, - "unique_id": "model.jaffle_shop.stg_customers", - }, - ], - }, + }, + ) ) - ) - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, json={"data": {"id": 10000, "project_id": 12345}} + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/1/run/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, json={"data": {"id": 10000, "project_id": 12345}} + ) ) - ) - with pytest.raises(DbtCloudJobRunFailed, match="Triggered job run with"): - await retry_dbt_cloud_job_run_subset_and_wait_for_completion( - dbt_cloud_credentials=dbt_cloud_credentials, - run_id=10000, - trigger_job_run_options=trigger_job_run_options, - ) + with pytest.raises(DbtCloudJobRunFailed, match="Triggered job run with"): + await retry_dbt_cloud_job_run_subset_and_wait_for_completion( + dbt_cloud_credentials=dbt_cloud_credentials, + run_id=10000, + trigger_job_run_options=trigger_job_run_options, + ) @pytest.fixture @@ -627,228 +610,253 @@ def test_fail(self): class TestTriggerWaitRetryDbtCloudJobRun: - @pytest.mark.respx(assert_all_called=True) - async def test_run_success(self, respx_mock_with_pass_through, dbt_cloud_job): - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/10000/run/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, json={"data": {"id": 10000, "project_id": 12345}} - ) - ) - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", - headers=HEADERS, - ).mock(return_value=Response(200, json={"data": {"id": 10000, "status": 10}})) - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/", - headers=HEADERS, - ).mock(return_value=Response(200, json={"data": ["manifest.json"]})) - - result = await run_dbt_cloud_job(dbt_cloud_job=dbt_cloud_job) - assert result == { - "id": 10000, - "status": 10, - "artifact_paths": ["manifest.json"], - } - - @pytest.mark.respx(assert_all_called=True) - async def test_run_timeout(self, respx_mock_with_pass_through, dbt_cloud_job): - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/10000/run/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, json={"data": {"id": 10000, "project_id": 12345}} - ) - ) - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", - headers=HEADERS, - ).mock(return_value=Response(200, json={"data": {"id": 10000, "status": 3}})) - - dbt_cloud_job.timeout_seconds = 1 - with pytest.raises(DbtCloudJobRunTimedOut, match="Max wait time of 1"): - await run_dbt_cloud_job(dbt_cloud_job=dbt_cloud_job) + async def test_run_success(self, dbt_cloud_job): + with respx.mock(using="httpx") as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/10000/run/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, json={"data": {"id": 10000, "project_id": 12345}} + ) + ) + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", + headers=HEADERS, + ).mock( + return_value=Response(200, json={"data": {"id": 10000, "status": 10}}) + ) + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/", + headers=HEADERS, + ).mock(return_value=Response(200, json={"data": ["manifest.json"]})) + + result = await run_dbt_cloud_job(dbt_cloud_job=dbt_cloud_job) + assert result == { + "id": 10000, + "status": 10, + "artifact_paths": ["manifest.json"], + } + + async def test_run_timeout(self, dbt_cloud_job): + with respx.mock(using="httpx") as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/10000/run/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, json={"data": {"id": 10000, "project_id": 12345}} + ) + ) + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", + headers=HEADERS, + ).mock( + return_value=Response(200, json={"data": {"id": 10000, "status": 3}}) + ) + + dbt_cloud_job.timeout_seconds = 1 + with pytest.raises(DbtCloudJobRunTimedOut, match="Max wait time of 1"): + await run_dbt_cloud_job(dbt_cloud_job=dbt_cloud_job) @pytest.mark.parametrize( "exe_command", ["run", "run-operation"], ) - async def test_fail(self, respx_mock_with_pass_through, dbt_cloud_job, exe_command): - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/10000/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, - json={"data": {"id": 10000, "project_id": 12345, "run_steps": [""]}}, - ) - ) - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/10000/run/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, json={"data": {"id": 10000, "project_id": 12345}} - ) - ) - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", - headers=HEADERS, - ).mock(return_value=Response(200, json={"data": {"id": 10000, "status": 20}})) - - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/100000/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, - json={ - "data": { - "id": 10000, - "generate_docs": False, - "generate_sources": False, - } - }, - ) - ) - - # mock get_dbt_cloud_run_info - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, - json={ - "data": { - "id": 10000, - "status": 20, # failed status - "run_steps": [ - { - "id": 432100123, - "run_id": 10000, - "account_id": 123456789, - "index": 1, - "name": "Clone Git Repository", - "status_humanized": "Success", - }, - { - "id": 432100124, - "run_id": 10000, - "account_id": 123456789, - "index": 2, - "name": "Create Profile from Connection Snowflake ", - "status_humanized": "Success", - }, + async def test_fail(self, dbt_cloud_job, exe_command): + with respx.mock(using="httpx", assert_all_called=False) as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/10000/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, + json={ + "data": {"id": 10000, "project_id": 12345, "run_steps": [""]} + }, + ) + ) + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/10000/run/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, json={"data": {"id": 10000, "project_id": 12345}} + ) + ) + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", + headers=HEADERS, + ).mock( + return_value=Response(200, json={"data": {"id": 10000, "status": 20}}) + ) + + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/100000/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, + json={ + "data": { + "id": 10000, + "generate_docs": False, + "generate_sources": False, + } + }, + ) + ) + + # mock get_dbt_cloud_run_info + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, + json={ + "data": { + "id": 10000, + "status": 20, # failed status + "run_steps": [ + { + "id": 432100123, + "run_id": 10000, + "account_id": 123456789, + "index": 1, + "name": "Clone Git Repository", + "status_humanized": "Success", + }, + { + "id": 432100124, + "run_id": 10000, + "account_id": 123456789, + "index": 2, + "name": "Create Profile from Connection Snowflake ", + "status_humanized": "Success", + }, + { + "id": 432100125, + "run_id": 10000, + "account_id": 123456789, + "index": 3, + "name": "Invoke dbt with `dbt deps`", + "status_humanized": "Success", + }, + { + "run_id": 10000, + "account_id": 123456789, + "index": 4, + "name": f"Invoke dbt with `dbt {exe_command}`", + "status_humanized": "Error", + }, + ], + "job_id": "1", + } + }, + ) + ) + + # mock list_dbt_cloud_run_artifacts + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/", + headers=HEADERS, + ).mock(return_value=Response(200, json={"data": ["run_results.json"]})) + + # mock get_dbt_cloud_run_artifact + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/run_results.json", # noqa + headers=HEADERS, + ).mock( + return_value=Response( + 200, + json={ + "metadata": {"env": {"DBT_CLOUD_JOB_ID": "1"}}, + "results": [ { - "id": 432100125, - "run_id": 10000, - "account_id": 123456789, - "index": 3, - "name": "Invoke dbt with `dbt deps`", - "status_humanized": "Success", - }, - { - "run_id": 10000, - "account_id": 123456789, - "index": 4, - "name": f"Invoke dbt with `dbt {exe_command}`", - "status_humanized": "Error", + "status": "fail", + "message": "FAIL 1", + "failures": None, + "unique_id": "model.jaffle_shop.stg_customers", }, ], - "job_id": "1", - } - }, - ) - ) - - # mock list_dbt_cloud_run_artifacts - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/", - headers=HEADERS, - ).mock(return_value=Response(200, json={"data": ["run_results.json"]})) - - # mock get_dbt_cloud_run_artifact - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/artifacts/run_results.json", # noqa - headers=HEADERS, - ).mock( - return_value=Response( - 200, - json={ - "metadata": {"env": {"DBT_CLOUD_JOB_ID": "1"}}, - "results": [ - { - "status": "fail", - "message": "FAIL 1", - "failures": None, - "unique_id": "model.jaffle_shop.stg_customers", - }, - ], - }, - ) - ) - - with pytest.raises(DbtCloudJobRunFailed, match="dbt Cloud job 10000"): - await run_dbt_cloud_job(dbt_cloud_job=dbt_cloud_job) - - @pytest.mark.respx(assert_all_called=True) - async def test_cancel(self, respx_mock_with_pass_through, dbt_cloud_job): - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/10000/run/", - headers=HEADERS, - ).mock( - return_value=Response( - 200, json={"data": {"id": 10000, "project_id": 12345}} - ) - ) - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", - headers=HEADERS, - ).mock(return_value=Response(200, json={"data": {"id": 10000, "status": 30}})) - - with pytest.raises(DbtCloudJobRunCancelled, match="dbt Cloud job 10000"): - await run_dbt_cloud_job(dbt_cloud_job=dbt_cloud_job) - - @pytest.mark.respx(assert_all_called=True) - async def test_fetch_result_running(self, respx_mock, dbt_cloud_job): - respx_mock.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/10000/run/", + }, + ) + ) + + with pytest.raises(DbtCloudJobRunFailed, match="dbt Cloud job 10000"): + await run_dbt_cloud_job(dbt_cloud_job=dbt_cloud_job) + + async def test_cancel(self, dbt_cloud_job): + with respx.mock(using="httpx") as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/10000/run/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, json={"data": {"id": 10000, "project_id": 12345}} + ) + ) + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", + headers=HEADERS, + ).mock( + return_value=Response(200, json={"data": {"id": 10000, "status": 30}}) + ) + + with pytest.raises(DbtCloudJobRunCancelled, match="dbt Cloud job 10000"): + await run_dbt_cloud_job(dbt_cloud_job=dbt_cloud_job) + + async def test_fetch_result_running(self, dbt_cloud_job): + with respx.mock(using="httpx", assert_all_called=False) as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/10000/run/", + headers=HEADERS, + ).mock( + return_value=Response( + 200, json={"data": {"id": 10000, "project_id": 12345}} + ) + ) + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", + headers=HEADERS, + ).mock( + return_value=Response(200, json={"data": {"id": 10000, "status": 3}}) + ) + + with pytest.raises(DbtCloudJobRunIncomplete, match="dbt Cloud job 10000"): + run = await dbt_cloud_job.trigger() + await run.fetch_result() + + async def test_fail_auth(self, dbt_cloud_job): + with respx.mock(using="httpx") as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.post( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/10000/run/", + headers=HEADERS, + ).mock( + return_value=Response( + 404, json={"status": {"user_message": "Not found"}} + ) + ) + with pytest.raises(DbtCloudJobRunTriggerFailed, match="Not found"): + await run_dbt_cloud_job(dbt_cloud_job=dbt_cloud_job, targeted_retries=0) + + +def test_get_job(dbt_cloud_job): + with respx.mock(using="httpx", assert_all_called=False) as respx_mock: + respx_mock.route(host="127.0.0.1").pass_through() + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/10000/", headers=HEADERS, ).mock( return_value=Response( 200, json={"data": {"id": 10000, "project_id": 12345}} ) ) - respx_mock.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/10000/", - headers=HEADERS, - ).mock(return_value=Response(200, json={"data": {"id": 10000, "status": 3}})) - - with pytest.raises(DbtCloudJobRunIncomplete, match="dbt Cloud job 10000"): - run = await dbt_cloud_job.trigger() - await run.fetch_result() - - @pytest.mark.respx(assert_all_called=True) - async def test_fail_auth(self, respx_mock_with_pass_through, dbt_cloud_job): - respx_mock_with_pass_through.post( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/10000/run/", - headers=HEADERS, - ).mock( - return_value=Response(404, json={"status": {"user_message": "Not found"}}) - ) - with pytest.raises(DbtCloudJobRunTriggerFailed, match="Not found"): - await run_dbt_cloud_job(dbt_cloud_job=dbt_cloud_job, targeted_retries=0) - - -def test_get_job(respx_mock_with_pass_through, dbt_cloud_job): - respx_mock_with_pass_through.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/jobs/10000/", - headers=HEADERS, - ).mock( - return_value=Response(200, json={"data": {"id": 10000, "project_id": 12345}}) - ) - assert dbt_cloud_job.get_job()["id"] == 10000 + assert dbt_cloud_job.get_job()["id"] == 10000 diff --git a/src/integrations/prefect-dbt/tests/cloud/test_runs.py b/src/integrations/prefect-dbt/tests/cloud/test_runs.py index cbb4fb713d1a..85fe89225cff 100644 --- a/src/integrations/prefect-dbt/tests/cloud/test_runs.py +++ b/src/integrations/prefect-dbt/tests/cloud/test_runs.py @@ -1,4 +1,5 @@ import pytest +import respx from httpx import Response from prefect_dbt.cloud.runs import ( DbtCloudGetRunArtifactFailed, @@ -11,163 +12,176 @@ class TestGetDbtCloudRunInfo: - async def test_get_dbt_cloud_run_info(self, respx_mock, dbt_cloud_credentials): - respx_mock.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/", - headers={"Authorization": "Bearer my_api_key"}, - ).mock(return_value=Response(200, json={"data": {"id": 10000}})) - - response = await get_dbt_cloud_run_info.fn( - dbt_cloud_credentials=dbt_cloud_credentials, - run_id=12, - ) - - assert response == {"id": 10000} - - async def test_get_nonexistent_run(self, respx_mock, dbt_cloud_credentials): - respx_mock.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/", - headers={"Authorization": "Bearer my_api_key"}, - ).mock( - return_value=Response(404, json={"status": {"user_message": "Not found!"}}) - ) - with pytest.raises(DbtCloudGetRunFailed, match="Not found!"): - await get_dbt_cloud_run_info.fn( + async def test_get_dbt_cloud_run_info(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/", + headers={"Authorization": "Bearer my_api_key"}, + ).mock(return_value=Response(200, json={"data": {"id": 10000}})) + + response = await get_dbt_cloud_run_info.fn( dbt_cloud_credentials=dbt_cloud_credentials, run_id=12, ) + assert response == {"id": 10000} + + async def test_get_nonexistent_run(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/", + headers={"Authorization": "Bearer my_api_key"}, + ).mock( + return_value=Response( + 404, json={"status": {"user_message": "Not found!"}} + ) + ) + with pytest.raises(DbtCloudGetRunFailed, match="Not found!"): + await get_dbt_cloud_run_info.fn( + dbt_cloud_credentials=dbt_cloud_credentials, + run_id=12, + ) + class TestDbtCloudListRunArtifacts: - async def test_list_artifacts_success(self, respx_mock, dbt_cloud_credentials): - respx_mock.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/artifacts/", - headers={"Authorization": "Bearer my_api_key"}, - ).mock(return_value=Response(200, json={"data": ["manifest.json"]})) - - response = await list_dbt_cloud_run_artifacts.fn( - dbt_cloud_credentials=dbt_cloud_credentials, - run_id=12, - ) - - assert response == ["manifest.json"] - - async def test_list_artifacts_with_step(self, respx_mock, dbt_cloud_credentials): - respx_mock.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/artifacts/?step=1", # noqa - headers={"Authorization": "Bearer my_api_key"}, - ).mock(return_value=Response(200, json={"data": ["manifest.json"]})) - - response = await list_dbt_cloud_run_artifacts.fn( - dbt_cloud_credentials=dbt_cloud_credentials, run_id=12, step=1 - ) - - assert response == ["manifest.json"] - - async def test_list_artifacts_failure(self, respx_mock, dbt_cloud_credentials): - respx_mock.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/artifacts/", - headers={"Authorization": "Bearer my_api_key"}, - ).mock( - return_value=Response( - 500, json={"status": {"user_message": "This is what went wrong"}} - ) - ) - with pytest.raises( - DbtCloudListRunArtifactsFailed, match="This is what went wrong" - ): - await list_dbt_cloud_run_artifacts.fn( + async def test_list_artifacts_success(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/artifacts/", + headers={"Authorization": "Bearer my_api_key"}, + ).mock(return_value=Response(200, json={"data": ["manifest.json"]})) + + response = await list_dbt_cloud_run_artifacts.fn( dbt_cloud_credentials=dbt_cloud_credentials, run_id=12, ) + assert response == ["manifest.json"] + + async def test_list_artifacts_with_step(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/artifacts/?step=1", # noqa + headers={"Authorization": "Bearer my_api_key"}, + ).mock(return_value=Response(200, json={"data": ["manifest.json"]})) + + response = await list_dbt_cloud_run_artifacts.fn( + dbt_cloud_credentials=dbt_cloud_credentials, run_id=12, step=1 + ) + + assert response == ["manifest.json"] + + async def test_list_artifacts_failure(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/artifacts/", + headers={"Authorization": "Bearer my_api_key"}, + ).mock( + return_value=Response( + 500, json={"status": {"user_message": "This is what went wrong"}} + ) + ) + with pytest.raises( + DbtCloudListRunArtifactsFailed, match="This is what went wrong" + ): + await list_dbt_cloud_run_artifacts.fn( + dbt_cloud_credentials=dbt_cloud_credentials, + run_id=12, + ) + class TestDbtCloudGetRunArtifact: - async def test_get_artifact_success(self, respx_mock, dbt_cloud_credentials): - respx_mock.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/artifacts/manifest.json", # noqa - headers={"Authorization": "Bearer my_api_key"}, - ).mock( - return_value=Response( - 200, - json={ - "metadata": { - "dbt_schema_version": "https://schemas.getdbt.com/dbt/catalog/v1.json", # noqa - "dbt_version": "1.1.1", - } - }, + async def test_get_artifact_success(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/artifacts/manifest.json", # noqa + headers={"Authorization": "Bearer my_api_key"}, + ).mock( + return_value=Response( + 200, + json={ + "metadata": { + "dbt_schema_version": "https://schemas.getdbt.com/dbt/catalog/v1.json", # noqa + "dbt_version": "1.1.1", + } + }, + ) ) - ) - response = await get_dbt_cloud_run_artifact.fn( - dbt_cloud_credentials=dbt_cloud_credentials, run_id=12, path="manifest.json" - ) + response = await get_dbt_cloud_run_artifact.fn( + dbt_cloud_credentials=dbt_cloud_credentials, + run_id=12, + path="manifest.json", + ) - assert response == { - "metadata": { - "dbt_schema_version": "https://schemas.getdbt.com/dbt/catalog/v1.json", - "dbt_version": "1.1.1", + assert response == { + "metadata": { + "dbt_schema_version": "https://schemas.getdbt.com/dbt/catalog/v1.json", + "dbt_version": "1.1.1", + } } - } - - async def test_get_non_json_artifact(self, respx_mock, dbt_cloud_credentials): - respx_mock.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/artifacts/compiled/dbt_artifacts/models/dim_dbt__current_models.sql", # noqa - headers={"Authorization": "Bearer my_api_key"}, - ).mock(return_value=Response(200, text="Hi! I'm some SQL!")) - - response = await get_dbt_cloud_run_artifact.fn( - dbt_cloud_credentials=dbt_cloud_credentials, - run_id=12, - path="compiled/dbt_artifacts/models/dim_dbt__current_models.sql", - ) - - assert response == "Hi! I'm some SQL!" - - async def test_get_artifact_with_step(self, respx_mock, dbt_cloud_credentials): - respx_mock.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/artifacts/manifest.json?step=1", # noqa - headers={"Authorization": "Bearer my_api_key"}, - ).mock( - return_value=Response( - 200, - json={ - "metadata": { - "dbt_schema_version": "https://schemas.getdbt.com/dbt/catalog/v1.json", # noqa - "dbt_version": "1.1.1", - } - }, + + async def test_get_non_json_artifact(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/artifacts/compiled/dbt_artifacts/models/dim_dbt__current_models.sql", # noqa + headers={"Authorization": "Bearer my_api_key"}, + ).mock(return_value=Response(200, text="Hi! I'm some SQL!")) + + response = await get_dbt_cloud_run_artifact.fn( + dbt_cloud_credentials=dbt_cloud_credentials, + run_id=12, + path="compiled/dbt_artifacts/models/dim_dbt__current_models.sql", ) - ) - - response = await get_dbt_cloud_run_artifact.fn( - dbt_cloud_credentials=dbt_cloud_credentials, - run_id=12, - path="manifest.json", - step=1, - ) - - assert response == { - "metadata": { - "dbt_schema_version": "https://schemas.getdbt.com/dbt/catalog/v1.json", - "dbt_version": "1.1.1", - } - } - - async def test_get_artifact_failure(self, respx_mock, dbt_cloud_credentials): - respx_mock.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/artifacts/manifest.json", # noqa - headers={"Authorization": "Bearer my_api_key"}, - ).mock( - return_value=Response( - 500, json={"status": {"user_message": "This is what went wrong"}} + + assert response == "Hi! I'm some SQL!" + + async def test_get_artifact_with_step(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/artifacts/manifest.json?step=1", # noqa + headers={"Authorization": "Bearer my_api_key"}, + ).mock( + return_value=Response( + 200, + json={ + "metadata": { + "dbt_schema_version": "https://schemas.getdbt.com/dbt/catalog/v1.json", # noqa + "dbt_version": "1.1.1", + } + }, + ) ) - ) - with pytest.raises( - DbtCloudGetRunArtifactFailed, match="This is what went wrong" - ): - await get_dbt_cloud_run_artifact.fn( + + response = await get_dbt_cloud_run_artifact.fn( dbt_cloud_credentials=dbt_cloud_credentials, run_id=12, path="manifest.json", + step=1, + ) + + assert response == { + "metadata": { + "dbt_schema_version": "https://schemas.getdbt.com/dbt/catalog/v1.json", + "dbt_version": "1.1.1", + } + } + + async def test_get_artifact_failure(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/artifacts/manifest.json", # noqa + headers={"Authorization": "Bearer my_api_key"}, + ).mock( + return_value=Response( + 500, json={"status": {"user_message": "This is what went wrong"}} + ) ) + with pytest.raises( + DbtCloudGetRunArtifactFailed, match="This is what went wrong" + ): + await get_dbt_cloud_run_artifact.fn( + dbt_cloud_credentials=dbt_cloud_credentials, + run_id=12, + path="manifest.json", + ) diff --git a/src/integrations/prefect-dbt/tests/cloud/test_utils.py b/src/integrations/prefect-dbt/tests/cloud/test_utils.py index b3fee6f13661..7f7f3561a867 100644 --- a/src/integrations/prefect-dbt/tests/cloud/test_utils.py +++ b/src/integrations/prefect-dbt/tests/cloud/test_utils.py @@ -1,4 +1,5 @@ import pytest +import respx from httpx import Response from prefect_dbt.cloud.utils import ( DbtCloudAdministrativeApiCallFailed, @@ -7,64 +8,67 @@ class TestCallDbtCloudAdministrativeApiEndpoint: - async def test_endpoint_returns_json(self, dbt_cloud_credentials, respx_mock): - respx_mock.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/projects/", - headers={"Authorization": "Bearer my_api_key"}, - ).mock( - return_value=Response( - 200, - json={ - "status": { - "code": 200, - "is_success": True, - "user_message": "Success!", - "developer_message": "", + async def test_endpoint_returns_json(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/projects/", + headers={"Authorization": "Bearer my_api_key"}, + ).mock( + return_value=Response( + 200, + json={ + "status": { + "code": 200, + "is_success": True, + "user_message": "Success!", + "developer_message": "", + }, + "data": [], }, - "data": [], - }, + ) ) - ) - - result = await call_dbt_cloud_administrative_api_endpoint.fn( - dbt_cloud_credentials=dbt_cloud_credentials, - path="/projects/", - http_method="GET", - ) - - assert result == { - "status": { - "code": 200, - "is_success": True, - "user_message": "Success!", - "developer_message": "", - }, - "data": [], - } - - async def test_endpoint_returns_text(self, dbt_cloud_credentials, respx_mock): - respx_mock.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/artifacts/compiled/dbt_artifacts/models/dim_dbt__current_models.sql", # noqa - headers={"Authorization": "Bearer my_api_key"}, - ).mock(return_value=Response(200, text="Hi! I'm some SQL!")) - result = await call_dbt_cloud_administrative_api_endpoint.fn( - dbt_cloud_credentials=dbt_cloud_credentials, - path="/runs/12/artifacts/compiled/dbt_artifacts/models/dim_dbt__current_models.sql", # noqa - http_method="GET", - ) + result = await call_dbt_cloud_administrative_api_endpoint.fn( + dbt_cloud_credentials=dbt_cloud_credentials, + path="/projects/", + http_method="GET", + ) - assert result == "Hi! I'm some SQL!" + assert result == { + "status": { + "code": 200, + "is_success": True, + "user_message": "Success!", + "developer_message": "", + }, + "data": [], + } - async def test_failure(self, dbt_cloud_credentials, respx_mock): - respx_mock.get( - "https://cloud.getdbt.com/api/v2/accounts/123456789/projects/", - headers={"Authorization": "Bearer my_api_key"}, - ).mock(return_value=Response(500, json={})) + async def test_endpoint_returns_text(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/runs/12/artifacts/compiled/dbt_artifacts/models/dim_dbt__current_models.sql", # noqa + headers={"Authorization": "Bearer my_api_key"}, + ).mock(return_value=Response(200, text="Hi! I'm some SQL!")) - with pytest.raises(DbtCloudAdministrativeApiCallFailed): - await call_dbt_cloud_administrative_api_endpoint.fn( + result = await call_dbt_cloud_administrative_api_endpoint.fn( dbt_cloud_credentials=dbt_cloud_credentials, - path="/projects/", + path="/runs/12/artifacts/compiled/dbt_artifacts/models/dim_dbt__current_models.sql", # noqa http_method="GET", ) + + assert result == "Hi! I'm some SQL!" + + async def test_failure(self, dbt_cloud_credentials): + with respx.mock(using="httpx") as respx_mock: + respx_mock.get( + "https://cloud.getdbt.com/api/v2/accounts/123456789/projects/", + headers={"Authorization": "Bearer my_api_key"}, + ).mock(return_value=Response(500, json={})) + + with pytest.raises(DbtCloudAdministrativeApiCallFailed): + await call_dbt_cloud_administrative_api_endpoint.fn( + dbt_cloud_credentials=dbt_cloud_credentials, + path="/projects/", + http_method="GET", + ) diff --git a/src/integrations/prefect-dbt/tests/test_utilities.py b/src/integrations/prefect-dbt/tests/test_utilities.py new file mode 100644 index 000000000000..736c04386cac --- /dev/null +++ b/src/integrations/prefect-dbt/tests/test_utilities.py @@ -0,0 +1,74 @@ +import os +from pathlib import Path + +import pytest +import yaml +from prefect_dbt.utilities import get_profiles_dir, load_profiles_yml + +SAMPLE_PROFILES = { + "jaffle_shop": { + "outputs": { + "dev": { + "type": "duckdb", + "path": "jaffle_shop.duckdb", + "schema": "main", + "threads": 4, + } + } + } +} + + +@pytest.fixture +def temp_profiles_dir(tmp_path): + profiles_dir = tmp_path / ".dbt" + profiles_dir.mkdir() + + profiles_path = profiles_dir / "profiles.yml" + with open(profiles_path, "w") as f: + yaml.dump(SAMPLE_PROFILES, f) + + return str(profiles_dir) + + +def test_get_profiles_dir_default(): + if "DBT_PROFILES_DIR" in os.environ: + del os.environ["DBT_PROFILES_DIR"] + + expected = os.path.expanduser("~/.dbt") + assert get_profiles_dir() == expected + + +def test_get_profiles_dir_from_env(monkeypatch): + test_path = "/custom/path" + monkeypatch.setenv("DBT_PROFILES_DIR", test_path) + assert get_profiles_dir() == test_path + + +def test_load_profiles_yml_success(temp_profiles_dir): + profiles = load_profiles_yml(temp_profiles_dir) + assert profiles == SAMPLE_PROFILES + + +def test_load_profiles_yml_default_dir(monkeypatch, temp_profiles_dir): + monkeypatch.setenv("DBT_PROFILES_DIR", temp_profiles_dir) + profiles = load_profiles_yml(None) + assert profiles == SAMPLE_PROFILES + + +def test_load_profiles_yml_file_not_found(): + nonexistent_dir = "/path/that/does/not/exist" + with pytest.raises( + ValueError, + match=f"No profiles.yml found at {os.path.join(nonexistent_dir, 'profiles.yml')}", + ): + load_profiles_yml(nonexistent_dir) + + +def test_load_profiles_yml_invalid_yaml(temp_profiles_dir): + profiles_path = Path(temp_profiles_dir) / "profiles.yml" + with open(profiles_path, "w") as f: + f.write("invalid: yaml: content:\nindentation error") + + with pytest.raises(yaml.YAMLError): + load_profiles_yml(temp_profiles_dir) diff --git a/src/integrations/prefect-docker/pyproject.toml b/src/integrations/prefect-docker/pyproject.toml index e947a46f52f9..3afb5cb89440 100644 --- a/src/integrations/prefect-docker/pyproject.toml +++ b/src/integrations/prefect-docker/pyproject.toml @@ -22,7 +22,7 @@ classifiers = [ "Programming Language :: Python :: 3.12", "Topic :: Software Development :: Libraries", ] -dependencies = ["prefect>=3.0.0", "docker>=6.1.1", "exceptiongroup"] +dependencies = ["prefect>=3.1.1", "docker>=6.1.1", "exceptiongroup"] dynamic = ["version"] [project.optional-dependencies] diff --git a/src/integrations/prefect-gcp/pyproject.toml b/src/integrations/prefect-gcp/pyproject.toml index 21c9e5b1e855..2e8fdbb79b08 100644 --- a/src/integrations/prefect-gcp/pyproject.toml +++ b/src/integrations/prefect-gcp/pyproject.toml @@ -23,7 +23,7 @@ classifiers = [ "Topic :: Software Development :: Libraries", ] dependencies = [ - "prefect>=3.0.0", + "prefect>=3.1.1", "google-api-python-client>=2.20.0", "google-cloud-storage>=2.0.0", "tenacity>=8.0.0", diff --git a/src/integrations/prefect-kubernetes/pyproject.toml b/src/integrations/prefect-kubernetes/pyproject.toml index 427f4c839aea..e6ac3b037af4 100644 --- a/src/integrations/prefect-kubernetes/pyproject.toml +++ b/src/integrations/prefect-kubernetes/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] dependencies = [ - "prefect>=3.1.0", + "prefect>=3.1.1", "kubernetes-asyncio>=29.0.0", "tenacity>=8.2.3", "exceptiongroup", diff --git a/src/prefect/_experimental/__init__.py b/src/prefect/_experimental/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/src/prefect/_experimental/lineage.py b/src/prefect/_experimental/lineage.py new file mode 100644 index 000000000000..b26474efffdc --- /dev/null +++ b/src/prefect/_experimental/lineage.py @@ -0,0 +1,181 @@ +from typing import TYPE_CHECKING, Any, Dict, Literal, Optional, Sequence, Union + +from prefect.events.related import related_resources_from_run_context +from prefect.events.schemas.events import RelatedResource, Resource +from prefect.events.utilities import emit_event +from prefect.settings import get_current_settings + +if TYPE_CHECKING: + from prefect.results import ResultStore + +UpstreamResources = Sequence[Union[RelatedResource, dict[str, str]]] +DownstreamResources = Sequence[Union[Resource, dict[str, str]]] + +# Map block types to their URI schemes +STORAGE_URI_SCHEMES = { + "local-file-system": "file://{path}", + "s3-bucket": "s3://{storage.bucket_name}/{path}", + "gcs-bucket": "gs://{storage.bucket}/{path}", + "azure-blob-storage": "azure-blob://{storage.container_name}/{path}", +} + + +def get_result_resource_uri( + store: "ResultStore", + key: str, +) -> Optional[str]: + """ + Generate a URI for a result based on its storage backend. + + Args: + store: A `ResultStore` instance. + key: The key of the result to generate a URI for. + """ + storage = store.result_storage + if storage is None: + return + + path = store._resolved_key_path(key) + + block_type = storage.get_block_type_slug() + if block_type and block_type in STORAGE_URI_SCHEMES: + return STORAGE_URI_SCHEMES[block_type].format(storage=storage, path=path) + + # Generic fallback + return f"prefect://{block_type}/{path}" + + +async def emit_lineage_event( + event_name: str, + upstream_resources: Optional[UpstreamResources] = None, + downstream_resources: Optional[DownstreamResources] = None, + direction_of_run_from_event: Literal["upstream", "downstream"] = "downstream", +) -> None: + """Emit lineage events showing relationships between resources. + + Args: + event_name: The name of the event to emit + upstream_resources: Optional list of RelatedResources that were upstream of + the event + downstream_resources: Optional list of Resources that were downstream + of the event + direction_of_run_from_event: The direction of the current run from + the event. E.g., if we're in a flow run and + `direction_of_run_from_event` is "downstream", then the flow run is + considered downstream of the resource's event. + """ + from prefect.client.orchestration import get_client # Avoid a circular import + + if not get_current_settings().experiments.lineage_events_enabled: + return + + upstream_resources = list(upstream_resources) if upstream_resources else [] + downstream_resources = list(downstream_resources) if downstream_resources else [] + + async with get_client() as client: + related_resources = await related_resources_from_run_context(client) + + # NOTE: We handle adding run-related resources to the event here instead of in + # the EventsWorker because not all run-related resources are upstream from + # every lineage event (they might be downstream). The EventsWorker only adds + # related resources to the "related" field in the event, which, for + # lineage-related events, tracks upstream resources only. For downstream + # resources, we need to emit an event for each downstream resource. + if direction_of_run_from_event == "downstream": + downstream_resources.extend(related_resources) + else: + upstream_resources.extend(related_resources) + + # Emit an event for each downstream resource. This is necessary because + # our event schema allows one primary resource and many related resources, + # and for the purposes of lineage, related resources can only represent + # upstream resources. + for resource in downstream_resources: + # Downstream lineage resources need to have the + # prefect.resource.lineage-group label. All upstram resources from a + # downstream resource with this label will be considered lineage-related + # resources. + if "prefect.resource.lineage-group" not in resource: + resource["prefect.resource.lineage-group"] = "global" + + emit_kwargs: Dict[str, Any] = { + "event": event_name, + "resource": resource, + "related": upstream_resources, + } + + emit_event(**emit_kwargs) + + +async def emit_result_read_event( + store: "ResultStore", + result_key: str, + downstream_resources: Optional[DownstreamResources] = None, + cached: bool = False, +) -> None: + """ + Emit a lineage event showing a task or flow result was read. + + Args: + store: A `ResultStore` instance. + result_key: The key of the result to generate a URI for. + downstream_resources: List of resources that were + downstream of the event's resource. + """ + if not get_current_settings().experiments.lineage_events_enabled: + return + + result_resource_uri = get_result_resource_uri(store, result_key) + if result_resource_uri: + upstream_resources = [ + RelatedResource( + root={ + "prefect.resource.id": result_resource_uri, + "prefect.resource.role": "result", + } + ) + ] + event_name = "prefect.result.read" + if cached: + event_name += ".cached" + + await emit_lineage_event( + event_name=event_name, + upstream_resources=upstream_resources, + downstream_resources=downstream_resources, + direction_of_run_from_event="downstream", + ) + + +async def emit_result_write_event( + store: "ResultStore", + result_key: str, + upstream_resources: Optional[UpstreamResources] = None, +) -> None: + """ + Emit a lineage event showing a task or flow result was written. + + Args: + store: A `ResultStore` instance. + result_key: The key of the result to generate a URI for. + upstream_resources: Optional list of resources that were + upstream of the event's resource. + """ + if not get_current_settings().experiments.lineage_events_enabled: + return + + result_resource_uri = get_result_resource_uri(store, result_key) + if result_resource_uri: + downstream_resources = [ + { + "prefect.resource.id": result_resource_uri, + "prefect.resource.role": "result", + "prefect.resource.lineage-group": "global", + } + ] + await emit_lineage_event( + event_name="prefect.result.write", + upstream_resources=upstream_resources, + downstream_resources=downstream_resources, + direction_of_run_from_event="upstream", + ) diff --git a/src/prefect/_internal/compatibility/async_dispatch.py b/src/prefect/_internal/compatibility/async_dispatch.py index 12422d696d0e..a0a3ffc67666 100644 --- a/src/prefect/_internal/compatibility/async_dispatch.py +++ b/src/prefect/_internal/compatibility/async_dispatch.py @@ -1,11 +1,12 @@ import asyncio import inspect from functools import wraps -from typing import Any, Callable, Coroutine, Optional, TypeVar, Union +from typing import TYPE_CHECKING, Any, Callable, Coroutine, Optional, TypeVar, Union from typing_extensions import ParamSpec -from prefect.tasks import Task +if TYPE_CHECKING: + from prefect.tasks import Task R = TypeVar("R") P = ParamSpec("P") @@ -13,20 +14,46 @@ def is_in_async_context() -> bool: """ - Returns True if called from within an async context (coroutine or running event loop) + Returns True if called from within an async context. + + An async context is one of: + - a coroutine + - a running event loop + - a task or flow that is async """ + from prefect.context import get_run_context + from prefect.exceptions import MissingContextError + try: - asyncio.get_running_loop() - return True - except RuntimeError: - return False + run_ctx = get_run_context() + parent_obj = getattr(run_ctx, "task", None) + if not parent_obj: + parent_obj = getattr(run_ctx, "flow", None) + return getattr(parent_obj, "isasync", True) + except MissingContextError: + # not in an execution context, make best effort to + # decide whether to syncify + try: + asyncio.get_running_loop() + return True + except RuntimeError: + return False -def _is_acceptable_callable(obj: Union[Callable, Task]) -> bool: +def _is_acceptable_callable( + obj: Union[Callable[P, R], "Task[P, R]", classmethod], +) -> bool: if inspect.iscoroutinefunction(obj): return True - if isinstance(obj, Task) and inspect.iscoroutinefunction(obj.fn): + + # Check if a task or flow. Need to avoid importing `Task` or `Flow` here + # due to circular imports. + if (fn := getattr(obj, "fn", None)) and inspect.iscoroutinefunction(fn): return True + + if isinstance(obj, classmethod) and inspect.iscoroutinefunction(obj.__func__): + return True + return False @@ -56,6 +83,8 @@ def wrapper( if should_run_sync: return sync_fn(*args, **kwargs) + if isinstance(async_impl, classmethod): + return async_impl.__func__(*args, **kwargs) return async_impl(*args, **kwargs) return wrapper # type: ignore diff --git a/src/prefect/_internal/compatibility/migration.py b/src/prefect/_internal/compatibility/migration.py index f160990d77dc..f39739f2c9df 100644 --- a/src/prefect/_internal/compatibility/migration.py +++ b/src/prefect/_internal/compatibility/migration.py @@ -86,7 +86,7 @@ # See src/prefect/filesystems.py for an example -def import_string_class_method(new_location: str) -> Callable: +def import_string_class_method(new_location: str) -> Callable[..., Any]: """ Handle moved class methods. diff --git a/src/prefect/_internal/concurrency/api.py b/src/prefect/_internal/concurrency/api.py index 6b9eadaa02eb..f263e61b6def 100644 --- a/src/prefect/_internal/concurrency/api.py +++ b/src/prefect/_internal/concurrency/api.py @@ -6,51 +6,48 @@ import asyncio import concurrent.futures import contextlib -from typing import ( - Awaitable, - Callable, - ContextManager, - Iterable, - Optional, - TypeVar, - Union, -) +from collections.abc import Awaitable, Iterable +from contextlib import AbstractContextManager +from typing import Any, Callable, Optional, Union, cast -from typing_extensions import ParamSpec +from typing_extensions import ParamSpec, TypeAlias, TypeVar from prefect._internal.concurrency.threads import ( WorkerThread, get_global_loop, in_global_loop, ) -from prefect._internal.concurrency.waiters import ( - AsyncWaiter, - Call, - SyncWaiter, -) +from prefect._internal.concurrency.waiters import AsyncWaiter, Call, SyncWaiter P = ParamSpec("P") -T = TypeVar("T") -Future = Union[concurrent.futures.Future, asyncio.Future] +T = TypeVar("T", infer_variance=True) +Future = Union[concurrent.futures.Future[T], asyncio.Future[T]] + +_SyncOrAsyncCallable: TypeAlias = Callable[P, Union[T, Awaitable[T]]] -def create_call(__fn: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> Call[T]: - return Call.new(__fn, *args, **kwargs) +def create_call( + __fn: _SyncOrAsyncCallable[P, T], *args: P.args, **kwargs: P.kwargs +) -> Call[T]: + return Call[T].new(__fn, *args, **kwargs) -def _cast_to_call(call_like: Union[Callable[[], T], Call[T]]) -> Call[T]: +def cast_to_call( + call_like: Union["_SyncOrAsyncCallable[[], T]", Call[T]], +) -> Call[T]: if isinstance(call_like, Call): - return call_like + return cast(Call[T], call_like) else: return create_call(call_like) class _base(abc.ABC): - @abc.abstractstaticmethod + @staticmethod + @abc.abstractmethod def wait_for_call_in_loop_thread( - __call: Union[Callable[[], T], Call[T]], + __call: Union["_SyncOrAsyncCallable[[], Any]", Call[T]], timeout: Optional[float] = None, - done_callbacks: Optional[Iterable[Call]] = None, + done_callbacks: Optional[Iterable[Call[Any]]] = None, ) -> T: """ Schedule a function in the global worker thread and wait for completion. @@ -59,11 +56,12 @@ def wait_for_call_in_loop_thread( """ raise NotImplementedError() - @abc.abstractstaticmethod + @staticmethod + @abc.abstractmethod def wait_for_call_in_new_thread( - __call: Union[Callable[[], T], Call[T]], + __call: Union["_SyncOrAsyncCallable[[], T]", Call[T]], timeout: Optional[float] = None, - done_callbacks: Optional[Iterable[Call]] = None, + done_callbacks: Optional[Iterable[Call[Any]]] = None, ) -> T: """ Schedule a function in a new worker thread. @@ -74,14 +72,15 @@ def wait_for_call_in_new_thread( @staticmethod def call_soon_in_new_thread( - __call: Union[Callable[[], T], Call[T]], timeout: Optional[float] = None + __call: Union["_SyncOrAsyncCallable[[], T]", Call[T]], + timeout: Optional[float] = None, ) -> Call[T]: """ Schedule a call for execution in a new worker thread. Returns the submitted call. """ - call = _cast_to_call(__call) + call = cast_to_call(__call) runner = WorkerThread(run_once=True) call.set_timeout(timeout) runner.submit(call) @@ -89,7 +88,7 @@ def call_soon_in_new_thread( @staticmethod def call_soon_in_loop_thread( - __call: Union[Callable[[], Awaitable[T]], Call[Awaitable[T]]], + __call: Union["_SyncOrAsyncCallable[[], T]", Call[T]], timeout: Optional[float] = None, ) -> Call[T]: """ @@ -97,7 +96,7 @@ def call_soon_in_loop_thread( Returns the submitted call. """ - call = _cast_to_call(__call) + call = cast_to_call(__call) runner = get_global_loop() call.set_timeout(timeout) runner.submit(call) @@ -116,7 +115,7 @@ def call_in_new_thread( @staticmethod def call_in_loop_thread( - __call: Union[Callable[[], Awaitable[T]], Call[Awaitable[T]]], + __call: Union[Callable[[], Awaitable[T]], Call[T]], timeout: Optional[float] = None, ) -> T: """ @@ -130,12 +129,12 @@ def call_in_loop_thread( class from_async(_base): @staticmethod async def wait_for_call_in_loop_thread( - __call: Union[Callable[[], Awaitable[T]], Call[Awaitable[T]]], + __call: Union[Callable[[], Awaitable[T]], Call[T]], timeout: Optional[float] = None, - done_callbacks: Optional[Iterable[Call]] = None, - contexts: Optional[Iterable[ContextManager]] = None, - ) -> Awaitable[T]: - call = _cast_to_call(__call) + done_callbacks: Optional[Iterable[Call[Any]]] = None, + contexts: Optional[Iterable[AbstractContextManager[Any]]] = None, + ) -> T: + call = cast_to_call(__call) waiter = AsyncWaiter(call) for callback in done_callbacks or []: waiter.add_done_callback(callback) @@ -150,9 +149,9 @@ async def wait_for_call_in_loop_thread( async def wait_for_call_in_new_thread( __call: Union[Callable[[], T], Call[T]], timeout: Optional[float] = None, - done_callbacks: Optional[Iterable[Call]] = None, + done_callbacks: Optional[Iterable[Call[Any]]] = None, ) -> T: - call = _cast_to_call(__call) + call = cast_to_call(__call) waiter = AsyncWaiter(call=call) for callback in done_callbacks or []: waiter.add_done_callback(callback) @@ -169,7 +168,7 @@ def call_in_new_thread( @staticmethod def call_in_loop_thread( - __call: Union[Callable[[], Awaitable[T]], Call[Awaitable[T]]], + __call: Union[Callable[[], Awaitable[T]], Call[T]], timeout: Optional[float] = None, ) -> Awaitable[T]: call = _base.call_soon_in_loop_thread(__call, timeout=timeout) @@ -181,13 +180,13 @@ class from_sync(_base): def wait_for_call_in_loop_thread( __call: Union[ Callable[[], Awaitable[T]], - Union[Callable[[], Awaitable[T]], Call[Awaitable[T]]], + Call[T], ], timeout: Optional[float] = None, - done_callbacks: Optional[Iterable[Call]] = None, - contexts: Optional[Iterable[ContextManager]] = None, - ) -> Awaitable[T]: - call = _cast_to_call(__call) + done_callbacks: Optional[Iterable[Call[T]]] = None, + contexts: Optional[Iterable[AbstractContextManager[Any]]] = None, + ) -> T: + call = cast_to_call(__call) waiter = SyncWaiter(call) _base.call_soon_in_loop_thread(call, timeout=timeout) for callback in done_callbacks or []: @@ -202,9 +201,9 @@ def wait_for_call_in_loop_thread( def wait_for_call_in_new_thread( __call: Union[Callable[[], T], Call[T]], timeout: Optional[float] = None, - done_callbacks: Optional[Iterable[Call]] = None, - ) -> Call[T]: - call = _cast_to_call(__call) + done_callbacks: Optional[Iterable[Call[T]]] = None, + ) -> T: + call = cast_to_call(__call) waiter = SyncWaiter(call=call) for callback in done_callbacks or []: waiter.add_done_callback(callback) @@ -214,20 +213,21 @@ def wait_for_call_in_new_thread( @staticmethod def call_in_new_thread( - __call: Union[Callable[[], T], Call[T]], timeout: Optional[float] = None + __call: Union["_SyncOrAsyncCallable[[], T]", Call[T]], + timeout: Optional[float] = None, ) -> T: call = _base.call_soon_in_new_thread(__call, timeout=timeout) return call.result() @staticmethod def call_in_loop_thread( - __call: Union[Callable[[], Awaitable[T]], Call[Awaitable[T]]], + __call: Union["_SyncOrAsyncCallable[[], T]", Call[T]], timeout: Optional[float] = None, - ) -> T: + ) -> Union[Awaitable[T], T]: if in_global_loop(): # Avoid deadlock where the call is submitted to the loop then the loop is # blocked waiting for the call - call = _cast_to_call(__call) + call = cast_to_call(__call) return call() call = _base.call_soon_in_loop_thread(__call, timeout=timeout) diff --git a/src/prefect/_internal/concurrency/calls.py b/src/prefect/_internal/concurrency/calls.py index 5e8b675bd23e..4a715ac90491 100644 --- a/src/prefect/_internal/concurrency/calls.py +++ b/src/prefect/_internal/concurrency/calls.py @@ -12,18 +12,20 @@ import inspect import threading import weakref +from collections.abc import Awaitable, Generator from concurrent.futures._base import ( CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, RUNNING, ) -from typing import Any, Awaitable, Callable, Dict, Generic, Optional, Tuple, TypeVar +from typing import TYPE_CHECKING, Any, Callable, Generic, Optional, Union -from typing_extensions import ParamSpec +from typing_extensions import ParamSpec, Self, TypeAlias, TypeVar, TypeVarTuple from prefect._internal.concurrency import logger from prefect._internal.concurrency.cancellation import ( + AsyncCancelScope, CancelledError, cancel_async_at, cancel_sync_at, @@ -31,9 +33,13 @@ ) from prefect._internal.concurrency.event_loop import get_running_loop -T = TypeVar("T") +T = TypeVar("T", infer_variance=True) +Ts = TypeVarTuple("Ts") P = ParamSpec("P") +_SyncOrAsyncCallable: TypeAlias = Callable[P, Union[T, Awaitable[T]]] + + # Tracks the current call being executed. Note that storing the `Call` # object for an async call directly in the contextvar appears to create a # memory leak, despite the fact that we `reset` when leaving the context @@ -41,16 +47,16 @@ # we already have strong references to the `Call` objects in other places # and b) this is used for performance optimizations where we have fallback # behavior if this weakref is garbage collected. A fix for issue #10952. -current_call: contextvars.ContextVar["weakref.ref[Call]"] = ( # novm +current_call: contextvars.ContextVar["weakref.ref[Call[Any]]"] = ( # novm contextvars.ContextVar("current_call") ) # Create a strong reference to tasks to prevent destruction during execution errors -_ASYNC_TASK_REFS = set() +_ASYNC_TASK_REFS: set[asyncio.Task[None]] = set() @contextlib.contextmanager -def set_current_call(call: "Call"): +def set_current_call(call: "Call[Any]") -> Generator[None, Any, None]: token = current_call.set(weakref.ref(call)) try: yield @@ -58,7 +64,7 @@ def set_current_call(call: "Call"): current_call.reset(token) -class Future(concurrent.futures.Future): +class Future(concurrent.futures.Future[T]): """ Extension of `concurrent.futures.Future` with support for cancellation of running futures. @@ -70,7 +76,7 @@ def __init__(self, name: Optional[str] = None) -> None: super().__init__() self._cancel_scope = None self._deadline = None - self._cancel_callbacks = [] + self._cancel_callbacks: list[Callable[[], None]] = [] self._name = name self._timed_out = False @@ -79,7 +85,7 @@ def set_running_or_notify_cancel(self, timeout: Optional[float] = None): return super().set_running_or_notify_cancel() @contextlib.contextmanager - def enforce_async_deadline(self): + def enforce_async_deadline(self) -> Generator[AsyncCancelScope]: with cancel_async_at(self._deadline, name=self._name) as self._cancel_scope: for callback in self._cancel_callbacks: self._cancel_scope.add_cancel_callback(callback) @@ -92,7 +98,7 @@ def enforce_sync_deadline(self): self._cancel_scope.add_cancel_callback(callback) yield self._cancel_scope - def add_cancel_callback(self, callback: Callable[[], None]): + def add_cancel_callback(self, callback: Callable[[], Any]) -> None: """ Add a callback to be enforced on cancellation. @@ -113,7 +119,7 @@ def timedout(self) -> bool: with self._condition: return self._timed_out - def cancel(self): + def cancel(self) -> bool: """Cancel the future if possible. Returns True if the future was cancelled, False otherwise. A future cannot be @@ -147,7 +153,12 @@ def cancel(self): self._invoke_callbacks() return True - def result(self, timeout=None): + if TYPE_CHECKING: + + def __get_result(self) -> T: + ... + + def result(self, timeout: Optional[float] = None) -> T: """Return the result of the call that the future represents. Args: @@ -186,7 +197,9 @@ def result(self, timeout=None): # Break a reference cycle with the exception in self._exception self = None - def _invoke_callbacks(self): + _done_callbacks: list[Callable[[Self], object]] + + def _invoke_callbacks(self) -> None: """ Invoke our done callbacks and clean up cancel scopes and cancel callbacks. Fixes a memory leak that hung on to Call objects, @@ -206,7 +219,7 @@ def _invoke_callbacks(self): self._cancel_callbacks = [] if self._cancel_scope: - self._cancel_scope._callbacks = [] + setattr(self._cancel_scope, "_callbacks", []) self._cancel_scope = None @@ -216,16 +229,21 @@ class Call(Generic[T]): A deferred function call. """ - future: Future - fn: Callable[..., T] - args: Tuple - kwargs: Dict[str, Any] + future: Future[T] + fn: "_SyncOrAsyncCallable[..., T]" + args: tuple[Any, ...] + kwargs: dict[str, Any] context: contextvars.Context - timeout: float + timeout: Optional[float] runner: Optional["Portal"] = None @classmethod - def new(cls, __fn: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> "Call[T]": + def new( + cls, + __fn: _SyncOrAsyncCallable[P, T], + *args: P.args, + **kwargs: P.kwargs, + ) -> Self: return cls( future=Future(name=getattr(__fn, "__name__", str(__fn))), fn=__fn, @@ -255,7 +273,7 @@ def set_runner(self, portal: "Portal") -> None: self.runner = portal - def run(self) -> Optional[Awaitable[T]]: + def run(self) -> Optional[Awaitable[None]]: """ Execute the call and place the result on the future. @@ -337,7 +355,7 @@ def timedout(self) -> bool: def cancel(self) -> bool: return self.future.cancel() - def _run_sync(self): + def _run_sync(self) -> Optional[Awaitable[T]]: cancel_scope = None try: with set_current_call(self): @@ -348,8 +366,8 @@ def _run_sync(self): # Forget this call's arguments in order to free up any memory # that may be referenced by them; after a call has happened, # there's no need to keep a reference to them - self.args = None - self.kwargs = None + with contextlib.suppress(AttributeError): + del self.args, self.kwargs # Return the coroutine for async execution if inspect.isawaitable(result): @@ -357,8 +375,10 @@ def _run_sync(self): except CancelledError: # Report cancellation + if TYPE_CHECKING: + assert cancel_scope is not None if cancel_scope.timedout(): - self.future._timed_out = True + setattr(self.future, "_timed_out", True) self.future.cancel() elif cancel_scope.cancelled(): self.future.cancel() @@ -374,8 +394,8 @@ def _run_sync(self): self.future.set_result(result) # noqa: F821 logger.debug("Finished call %r", self) # noqa: F821 - async def _run_async(self, coro): - cancel_scope = None + async def _run_async(self, coro: Awaitable[T]) -> None: + cancel_scope = result = None try: with set_current_call(self): with self.future.enforce_async_deadline() as cancel_scope: @@ -385,12 +405,14 @@ async def _run_async(self, coro): # Forget this call's arguments in order to free up any memory # that may be referenced by them; after a call has happened, # there's no need to keep a reference to them - self.args = None - self.kwargs = None + with contextlib.suppress(AttributeError): + del self.args, self.kwargs except CancelledError: # Report cancellation + if TYPE_CHECKING: + assert cancel_scope is not None if cancel_scope.timedout(): - self.future._timed_out = True + setattr(self.future, "_timed_out", True) self.future.cancel() elif cancel_scope.cancelled(): self.future.cancel() @@ -403,10 +425,11 @@ async def _run_async(self, coro): # Prevent reference cycle in `exc` del self else: + # F821 ignored because Ruff gets confused about the del self above. self.future.set_result(result) # noqa: F821 logger.debug("Finished async call %r", self) # noqa: F821 - def __call__(self) -> T: + def __call__(self) -> Union[T, Awaitable[T]]: """ Execute the call and return its result. @@ -417,7 +440,7 @@ def __call__(self) -> T: # Return an awaitable if in an async context if coro is not None: - async def run_and_return_result(): + async def run_and_return_result() -> T: await coro return self.result() @@ -428,8 +451,9 @@ async def run_and_return_result(): def __repr__(self) -> str: name = getattr(self.fn, "__name__", str(self.fn)) - args, kwargs = self.args, self.kwargs - if args is None or kwargs is None: + try: + args, kwargs = self.args, self.kwargs + except AttributeError: call_args = "" else: call_args = ", ".join( @@ -450,7 +474,7 @@ class Portal(abc.ABC): """ @abc.abstractmethod - def submit(self, call: "Call") -> "Call": + def submit(self, call: "Call[T]") -> "Call[T]": """ Submit a call to execute elsewhere. diff --git a/src/prefect/_internal/concurrency/cancellation.py b/src/prefect/_internal/concurrency/cancellation.py index 372f016d1216..57564ba4c9f5 100644 --- a/src/prefect/_internal/concurrency/cancellation.py +++ b/src/prefect/_internal/concurrency/cancellation.py @@ -12,14 +12,15 @@ import sys import threading import time -from typing import Callable, Dict, Optional, Type +from types import TracebackType +from typing import TYPE_CHECKING, Any, Callable, Optional, overload import anyio from prefect._internal.concurrency import logger from prefect._internal.concurrency.event_loop import get_running_loop -_THREAD_SHIELDS: Dict[threading.Thread, "ThreadShield"] = {} +_THREAD_SHIELDS: dict[threading.Thread, "ThreadShield"] = {} _THREAD_SHIELDS_LOCK = threading.Lock() @@ -42,14 +43,14 @@ def __init__(self, owner: threading.Thread): # Uses the Python implementation of the RLock instead of the C implementation # because we need to inspect `_count` directly to check if the lock is active # which is needed for delayed exception raising during alarms - self._lock = threading._RLock() + self._lock = threading._RLock() # type: ignore # yes, we want the private version self._exception = None self._owner = owner def __enter__(self) -> None: self._lock.__enter__() - def __exit__(self, *exc_info): + def __exit__(self, *exc_info: Any): retval = self._lock.__exit__(*exc_info) # Raise the exception if this is the last shield to exit in the owner thread @@ -65,14 +66,14 @@ def __exit__(self, *exc_info): return retval - def set_exception(self, exc: Exception): + def set_exception(self, exc: BaseException): self._exception = exc def active(self) -> bool: """ Returns true if the shield is active. """ - return self._lock._count > 0 + return getattr(self._lock, "_count") > 0 class CancelledError(asyncio.CancelledError): @@ -82,7 +83,7 @@ class CancelledError(asyncio.CancelledError): pass -def _get_thread_shield(thread) -> ThreadShield: +def _get_thread_shield(thread: threading.Thread) -> ThreadShield: with _THREAD_SHIELDS_LOCK: if thread not in _THREAD_SHIELDS: _THREAD_SHIELDS[thread] = ThreadShield(thread) @@ -139,7 +140,7 @@ def __init__( self._end_time = None self._timeout = timeout self._lock = threading.Lock() - self._callbacks = [] + self._callbacks: list[Callable[[], None]] = [] super().__init__() def __enter__(self): @@ -151,7 +152,9 @@ def __enter__(self): logger.debug("%r entered", self) return self - def __exit__(self, *_): + def __exit__( + self, exc_type: type[BaseException], exc_val: Exception, exc_tb: TracebackType + ) -> Optional[bool]: with self._lock: if not self._cancelled: self._completed = True @@ -195,7 +198,7 @@ def cancel(self, throw: bool = True) -> bool: throw the cancelled error. """ with self._lock: - if not self.started: + if not self._started: raise RuntimeError("Scope has not been entered.") if self._completed: @@ -247,7 +250,6 @@ def __init__( self, name: Optional[str] = None, timeout: Optional[float] = None ) -> None: super().__init__(name=name, timeout=timeout) - self.loop = None def __enter__(self): self.loop = asyncio.get_running_loop() @@ -262,7 +264,9 @@ def __enter__(self): return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__( + self, exc_type: type[BaseException], exc_val: Exception, exc_tb: TracebackType + ) -> bool: if self._anyio_scope.cancel_called: # Mark as cancelled self.cancel(throw=False) @@ -310,7 +314,7 @@ def __init__( super().__init__(name, timeout) self.reason = reason or "null cancel scope" - def cancel(self): + def cancel(self, throw: bool = True) -> bool: logger.warning("%r cannot cancel %s.", self, self.reason) return False @@ -355,7 +359,7 @@ def __enter__(self): return self - def _sigalarm_to_error(self, *args): + def _sigalarm_to_error(self, *args: object) -> None: logger.debug("%r captured alarm raising as cancelled error", self) if self.cancel(throw=False): shield = _get_thread_shield(threading.main_thread()) @@ -365,11 +369,13 @@ def _sigalarm_to_error(self, *args): else: raise CancelledError() - def __exit__(self, *_): + def __exit__(self, *_: Any) -> Optional[bool]: retval = super().__exit__(*_) if self.timeout is not None: # Restore the previous timer + if TYPE_CHECKING: + assert self._previous_timer is not None signal.setitimer(signal.ITIMER_REAL, *self._previous_timer) # Restore the previous signal handler @@ -417,7 +423,7 @@ def __enter__(self): return self - def __exit__(self, *_): + def __exit__(self, *_: Any) -> Optional[bool]: retval = super().__exit__(*_) self._event.set() if self._enforcer_thread: @@ -466,7 +472,17 @@ def cancel(self, throw: bool = True): return True -def get_deadline(timeout: Optional[float]): +@overload +def get_deadline(timeout: float) -> float: + ... + + +@overload +def get_deadline(timeout: None) -> None: + ... + + +def get_deadline(timeout: Optional[float]) -> Optional[float]: """ Compute an deadline given a timeout. @@ -572,7 +588,7 @@ def cancel_sync_after(timeout: Optional[float], name: Optional[str] = None): yield scope -def _send_exception_to_thread(thread: threading.Thread, exc_type: Type[BaseException]): +def _send_exception_to_thread(thread: threading.Thread, exc_type: type[BaseException]): """ Raise an exception in a thread. diff --git a/src/prefect/_internal/concurrency/event_loop.py b/src/prefect/_internal/concurrency/event_loop.py index b3a8c0026056..5b690d53f01b 100644 --- a/src/prefect/_internal/concurrency/event_loop.py +++ b/src/prefect/_internal/concurrency/event_loop.py @@ -5,7 +5,8 @@ import asyncio import concurrent.futures import functools -from typing import Awaitable, Callable, Coroutine, Optional, TypeVar +from collections.abc import Coroutine +from typing import Any, Callable, Optional, TypeVar from typing_extensions import ParamSpec @@ -13,7 +14,7 @@ T = TypeVar("T") -def get_running_loop() -> Optional[asyncio.BaseEventLoop]: +def get_running_loop() -> Optional[asyncio.AbstractEventLoop]: """ Get the current running loop. @@ -30,7 +31,7 @@ def call_soon_in_loop( __fn: Callable[P, T], *args: P.args, **kwargs: P.kwargs, -) -> concurrent.futures.Future: +) -> concurrent.futures.Future[T]: """ Run a synchronous call in an event loop's thread from another thread. @@ -38,7 +39,7 @@ def call_soon_in_loop( Returns a future that can be used to retrieve the result of the call. """ - future = concurrent.futures.Future() + future: concurrent.futures.Future[T] = concurrent.futures.Future() @functools.wraps(__fn) def wrapper() -> None: @@ -62,8 +63,8 @@ def wrapper() -> None: async def run_coroutine_in_loop_from_async( - __loop: asyncio.AbstractEventLoop, __coro: Coroutine -) -> Awaitable: + __loop: asyncio.AbstractEventLoop, __coro: Coroutine[Any, Any, T] +) -> T: """ Run an asynchronous call in an event loop from an asynchronous context. diff --git a/src/prefect/_internal/concurrency/threads.py b/src/prefect/_internal/concurrency/threads.py index 301cc386d513..c3f5e4589428 100644 --- a/src/prefect/_internal/concurrency/threads.py +++ b/src/prefect/_internal/concurrency/threads.py @@ -8,7 +8,9 @@ import itertools import queue import threading -from typing import List, Optional +from typing import Any, Optional + +from typing_extensions import TypeVar from prefect._internal.concurrency import logger from prefect._internal.concurrency.calls import Call, Portal @@ -16,6 +18,8 @@ from prefect._internal.concurrency.event_loop import get_running_loop from prefect._internal.concurrency.primitives import Event +T = TypeVar("T", infer_variance=True) + class WorkerThread(Portal): """ @@ -33,7 +37,7 @@ def __init__( self.thread = threading.Thread( name=name, daemon=daemon, target=self._entrypoint ) - self._queue = queue.Queue() + self._queue: queue.Queue[Optional[Call[Any]]] = queue.Queue() self._run_once: bool = run_once self._started: bool = False self._submitted_count: int = 0 @@ -42,7 +46,7 @@ def __init__( if not daemon: atexit.register(self.shutdown) - def start(self): + def start(self) -> None: """ Start the worker thread. """ @@ -51,7 +55,7 @@ def start(self): self._started = True self.thread.start() - def submit(self, call: Call) -> Call: + def submit(self, call: Call[T]) -> Call[T]: if self._submitted_count > 0 and self._run_once: raise RuntimeError( "Worker configured to only run once. A call has already been submitted." @@ -83,7 +87,7 @@ def shutdown(self) -> None: def name(self) -> str: return self.thread.name - def _entrypoint(self): + def _entrypoint(self) -> None: """ Entrypoint for the thread. """ @@ -129,12 +133,14 @@ def __init__( self.thread = threading.Thread( name=name, daemon=daemon, target=self._entrypoint ) - self._ready_future = concurrent.futures.Future() + self._ready_future: concurrent.futures.Future[ + bool + ] = concurrent.futures.Future() self._loop: Optional[asyncio.AbstractEventLoop] = None self._shutdown_event: Event = Event() self._run_once: bool = run_once self._submitted_count: int = 0 - self._on_shutdown: List[Call] = [] + self._on_shutdown: list[Call[Any]] = [] self._lock = threading.Lock() if not daemon: @@ -149,7 +155,7 @@ def start(self): self.thread.start() self._ready_future.result() - def submit(self, call: Call) -> Call: + def submit(self, call: Call[T]) -> Call[T]: if self._loop is None: self.start() @@ -167,6 +173,7 @@ def submit(self, call: Call) -> Call: call.set_runner(self) # Submit the call to the event loop + assert self._loop is not None asyncio.run_coroutine_threadsafe(self._run_call(call), self._loop) self._submitted_count += 1 @@ -180,15 +187,16 @@ def shutdown(self) -> None: Shutdown the worker thread. Does not wait for the thread to stop. """ with self._lock: - if self._shutdown_event is None: - return - self._shutdown_event.set() @property def name(self) -> str: return self.thread.name + @property + def running(self) -> bool: + return not self._shutdown_event.is_set() + def _entrypoint(self): """ Entrypoint for the thread. @@ -218,12 +226,12 @@ async def _run_until_shutdown(self): # Empty the list to allow calls to be garbage collected. Issue #10338. self._on_shutdown = [] - async def _run_call(self, call: Call) -> None: + async def _run_call(self, call: Call[Any]) -> None: task = call.run() if task is not None: await task - def add_shutdown_call(self, call: Call) -> None: + def add_shutdown_call(self, call: Call[Any]) -> None: self._on_shutdown.append(call) def __enter__(self): @@ -235,9 +243,9 @@ def __exit__(self, *_): # the GLOBAL LOOP is used for background services, like logs -GLOBAL_LOOP: Optional[EventLoopThread] = None +_global_loop: Optional[EventLoopThread] = None # the RUN SYNC LOOP is used exclusively for running async functions in a sync context via asyncutils.run_sync -RUN_SYNC_LOOP: Optional[EventLoopThread] = None +_run_sync_loop: Optional[EventLoopThread] = None def get_global_loop() -> EventLoopThread: @@ -246,29 +254,29 @@ def get_global_loop() -> EventLoopThread: Creates a new one if there is not one available. """ - global GLOBAL_LOOP + global _global_loop # Create a new worker on first call or if the existing worker is dead if ( - GLOBAL_LOOP is None - or not GLOBAL_LOOP.thread.is_alive() - or GLOBAL_LOOP._shutdown_event.is_set() + _global_loop is None + or not _global_loop.thread.is_alive() + or not _global_loop.running ): - GLOBAL_LOOP = EventLoopThread(daemon=True, name="GlobalEventLoopThread") - GLOBAL_LOOP.start() + _global_loop = EventLoopThread(daemon=True, name="GlobalEventLoopThread") + _global_loop.start() - return GLOBAL_LOOP + return _global_loop def in_global_loop() -> bool: """ Check if called from the global loop. """ - if GLOBAL_LOOP is None: + if _global_loop is None: # Avoid creating a global loop if there isn't one return False - return get_global_loop()._loop == get_running_loop() + return getattr(get_global_loop(), "_loop") == get_running_loop() def get_run_sync_loop() -> EventLoopThread: @@ -277,29 +285,29 @@ def get_run_sync_loop() -> EventLoopThread: Creates a new one if there is not one available. """ - global RUN_SYNC_LOOP + global _run_sync_loop # Create a new worker on first call or if the existing worker is dead if ( - RUN_SYNC_LOOP is None - or not RUN_SYNC_LOOP.thread.is_alive() - or RUN_SYNC_LOOP._shutdown_event.is_set() + _run_sync_loop is None + or not _run_sync_loop.thread.is_alive() + or not _run_sync_loop.running ): - RUN_SYNC_LOOP = EventLoopThread(daemon=True, name="RunSyncEventLoopThread") - RUN_SYNC_LOOP.start() + _run_sync_loop = EventLoopThread(daemon=True, name="RunSyncEventLoopThread") + _run_sync_loop.start() - return RUN_SYNC_LOOP + return _run_sync_loop def in_run_sync_loop() -> bool: """ Check if called from the global loop. """ - if RUN_SYNC_LOOP is None: + if _run_sync_loop is None: # Avoid creating a global loop if there isn't one return False - return get_run_sync_loop()._loop == get_running_loop() + return getattr(get_run_sync_loop(), "_loop") == get_running_loop() def wait_for_global_loop_exit(timeout: Optional[float] = None) -> None: diff --git a/src/prefect/_internal/concurrency/waiters.py b/src/prefect/_internal/concurrency/waiters.py index 3925e3b25691..07522992100d 100644 --- a/src/prefect/_internal/concurrency/waiters.py +++ b/src/prefect/_internal/concurrency/waiters.py @@ -10,7 +10,8 @@ import queue import threading from collections import deque -from typing import Awaitable, Generic, List, Optional, TypeVar, Union +from collections.abc import Awaitable +from typing import TYPE_CHECKING, Any, Generic, Optional, TypeVar from weakref import WeakKeyDictionary import anyio @@ -24,12 +25,12 @@ # Waiters are stored in a stack for each thread -_WAITERS_BY_THREAD: "WeakKeyDictionary[threading.Thread, deque[Waiter]]" = ( +_WAITERS_BY_THREAD: "WeakKeyDictionary[threading.Thread, deque[Waiter[Any]]]" = ( WeakKeyDictionary() ) -def add_waiter_for_thread(waiter: "Waiter", thread: threading.Thread): +def add_waiter_for_thread(waiter: "Waiter[Any]", thread: threading.Thread) -> None: """ Add a waiter for a thread. """ @@ -62,7 +63,7 @@ def call_is_done(self) -> bool: return self._call.future.done() @abc.abstractmethod - def wait(self) -> Union[Awaitable[None], None]: + def wait(self) -> T: """ Wait for the call to finish. @@ -71,7 +72,7 @@ def wait(self) -> Union[Awaitable[None], None]: raise NotImplementedError() @abc.abstractmethod - def add_done_callback(self, callback: Call) -> Call: + def add_done_callback(self, callback: Call[Any]) -> None: """ Schedule a call to run when the waiter is done waiting. @@ -91,11 +92,11 @@ class SyncWaiter(Waiter[T]): def __init__(self, call: Call[T]) -> None: super().__init__(call=call) - self._queue: queue.Queue = queue.Queue() - self._done_callbacks = [] + self._queue: queue.Queue[Optional[Call[T]]] = queue.Queue() + self._done_callbacks: list[Call[Any]] = [] self._done_event = threading.Event() - def submit(self, call: Call): + def submit(self, call: Call[T]) -> Call[T]: """ Submit a callback to execute while waiting. """ @@ -109,7 +110,7 @@ def submit(self, call: Call): def _handle_waiting_callbacks(self): logger.debug("Waiter %r watching for callbacks", self) while True: - callback: Call = self._queue.get() + callback = self._queue.get() if callback is None: break @@ -130,13 +131,13 @@ def _handle_done_callbacks(self): if callback: callback.run() - def add_done_callback(self, callback: Call): + def add_done_callback(self, callback: Call[Any]) -> None: if self._done_event.is_set(): raise RuntimeError("Cannot add done callbacks to done waiters.") else: self._done_callbacks.append(callback) - def wait(self) -> T: + def wait(self) -> Call[T]: # Stop watching for work once the future is done self._call.future.add_done_callback(lambda _: self._queue.put_nowait(None)) self._call.future.add_done_callback(lambda _: self._done_event.set()) @@ -159,13 +160,13 @@ def __init__(self, call: Call[T]) -> None: # Delay instantiating loop and queue as there may not be a loop present yet self._loop: Optional[asyncio.AbstractEventLoop] = None - self._queue: Optional[asyncio.Queue] = None - self._early_submissions: List[Call] = [] - self._done_callbacks = [] + self._queue: Optional[asyncio.Queue[Optional[Call[T]]]] = None + self._early_submissions: list[Call[T]] = [] + self._done_callbacks: list[Call[Any]] = [] self._done_event = Event() self._done_waiting = False - def submit(self, call: Call): + def submit(self, call: Call[T]) -> Call[T]: """ Submit a callback to execute while waiting. """ @@ -180,11 +181,15 @@ def submit(self, call: Call): return call # We must put items in the queue from the event loop that owns it + if TYPE_CHECKING: + assert self._loop is not None call_soon_in_loop(self._loop, self._queue.put_nowait, call) return call - def _resubmit_early_submissions(self): - assert self._queue + def _resubmit_early_submissions(self) -> None: + if TYPE_CHECKING: + assert self._queue is not None + assert self._loop is not None for call in self._early_submissions: # We must put items in the queue from the event loop that owns it call_soon_in_loop(self._loop, self._queue.put_nowait, call) @@ -192,11 +197,11 @@ def _resubmit_early_submissions(self): async def _handle_waiting_callbacks(self): logger.debug("Waiter %r watching for callbacks", self) - tasks = [] + tasks: list[Awaitable[None]] = [] try: while True: - callback: Call = await self._queue.get() + callback = await self._queue.get() if callback is None: break @@ -228,12 +233,12 @@ async def _handle_done_callbacks(self): with anyio.CancelScope(shield=True): await self._run_done_callback(callback) - async def _run_done_callback(self, callback: Call): + async def _run_done_callback(self, callback: Call[Any]) -> None: coro = callback.run() if coro: await coro - def add_done_callback(self, callback: Call): + def add_done_callback(self, callback: Call[Any]) -> None: if self._done_event.is_set(): raise RuntimeError("Cannot add done callbacks to done waiters.") else: @@ -243,6 +248,8 @@ def _signal_stop_waiting(self): # Only send a `None` to the queue if the waiter is still blocked reading from # the queue. Otherwise, it's possible that the event loop is stopped. if not self._done_waiting: + assert self._loop is not None + assert self._queue is not None call_soon_in_loop(self._loop, self._queue.put_nowait, None) async def wait(self) -> Call[T]: diff --git a/src/prefect/_internal/pydantic/v1_schema.py b/src/prefect/_internal/pydantic/v1_schema.py index b94b0fc5973a..2fc409ae2625 100644 --- a/src/prefect/_internal/pydantic/v1_schema.py +++ b/src/prefect/_internal/pydantic/v1_schema.py @@ -6,7 +6,7 @@ from pydantic.v1 import BaseModel as V1BaseModel -def is_v1_model(v) -> bool: +def is_v1_model(v: typing.Any) -> bool: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", category=pydantic.warnings.PydanticDeprecatedSince20 @@ -23,7 +23,7 @@ def is_v1_model(v) -> bool: return False -def is_v1_type(v) -> bool: +def is_v1_type(v: typing.Any) -> bool: if is_v1_model(v): return True diff --git a/src/prefect/_internal/pydantic/v2_schema.py b/src/prefect/_internal/pydantic/v2_schema.py index c9d7fa08b5e1..da22799c742b 100644 --- a/src/prefect/_internal/pydantic/v2_schema.py +++ b/src/prefect/_internal/pydantic/v2_schema.py @@ -16,7 +16,7 @@ from prefect._internal.pydantic.schemas import GenerateEmptySchemaForUserClasses -def is_v2_model(v) -> bool: +def is_v2_model(v: t.Any) -> bool: if isinstance(v, V2BaseModel): return True try: @@ -28,7 +28,7 @@ def is_v2_model(v) -> bool: return False -def is_v2_type(v) -> bool: +def is_v2_type(v: t.Any) -> bool: if is_v2_model(v): return True @@ -56,9 +56,9 @@ def process_v2_params( param: inspect.Parameter, *, position: int, - docstrings: t.Dict[str, str], - aliases: t.Dict, -) -> t.Tuple[str, t.Any, "pydantic.Field"]: + docstrings: dict[str, str], + aliases: dict[str, str], +) -> tuple[str, t.Any, t.Any]: """ Generate a sanitized name, type, and pydantic.Field for a given parameter. @@ -72,7 +72,7 @@ def process_v2_params( else: name = param.name - type_ = t.Any if param.annotation is inspect._empty else param.annotation + type_ = t.Any if param.annotation is inspect.Parameter.empty else param.annotation # Replace pendulum type annotations with our own so that they are pydantic compatible if type_ == pendulum.DateTime: @@ -95,12 +95,13 @@ def process_v2_params( def create_v2_schema( name_: str, model_cfg: t.Optional[ConfigDict] = None, - model_base: t.Optional[t.Type[V2BaseModel]] = None, - **model_fields, -): + model_base: t.Optional[type[V2BaseModel]] = None, + model_fields: t.Optional[dict[str, t.Any]] = None, +) -> dict[str, t.Any]: """ Create a pydantic v2 model and craft a v1 compatible schema from it. """ + model_fields = model_fields or {} model = create_model( name_, __config__=model_cfg, __base__=model_base, **model_fields ) diff --git a/src/prefect/_internal/pydantic/v2_validated_func.py b/src/prefect/_internal/pydantic/v2_validated_func.py index 382f129f42d9..fed37832e38c 100644 --- a/src/prefect/_internal/pydantic/v2_validated_func.py +++ b/src/prefect/_internal/pydantic/v2_validated_func.py @@ -6,16 +6,17 @@ arguments. """ -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union +from typing import TYPE_CHECKING, Any, ClassVar, Optional, Union # importing directly from v2 to be able to create a v2 model from pydantic import BaseModel, ConfigDict, create_model, field_validator from pydantic.v1.decorator import ValidatedFunction from pydantic.v1.errors import ConfigError from pydantic.v1.utils import to_camel +from typing_extensions import TypeAlias if TYPE_CHECKING: - ConfigType = Union[None, Type[Any], Dict[str, Any]] + ConfigType: TypeAlias = Union[None, type[Any], dict[str, Any]] V_POSITIONAL_ONLY_NAME = "v__positional_only" V_DUPLICATE_KWARGS = "v__duplicate_kwargs" @@ -24,13 +25,17 @@ class V2ValidatedFunction(ValidatedFunction): def create_model( self, - fields: Dict[str, Any], + fields: dict[str, Any], takes_args: bool, takes_kwargs: bool, - config: ConfigDict, + config: "ConfigType", ) -> None: pos_args = len(self.arg_mapping) + config = {} if config is None else config + if not isinstance(config, dict): + raise TypeError(f"config must be None or a dict, got {type(config)}") + if config.get("fields") or config.get("alias_generator"): raise ConfigError( 'Setting the "fields" and "alias_generator" property on custom Config' @@ -42,11 +47,11 @@ def create_model( # This is the key change -- inheriting the BaseModel class from v2 class DecoratorBaseModel(BaseModel): - model_config = config + model_config: ClassVar[ConfigDict] = ConfigDict(**config) @field_validator(self.v_args_name, check_fields=False) @classmethod - def check_args(cls, v: Optional[List[Any]]) -> Optional[List[Any]]: + def check_args(cls, v: Optional[list[Any]]) -> Optional[list[Any]]: if takes_args or v is None: return v @@ -58,8 +63,8 @@ def check_args(cls, v: Optional[List[Any]]) -> Optional[List[Any]]: @field_validator(self.v_kwargs_name, check_fields=False) @classmethod def check_kwargs( - cls, v: Optional[Dict[str, Any]] - ) -> Optional[Dict[str, Any]]: + cls, v: Optional[dict[str, Any]] + ) -> Optional[dict[str, Any]]: if takes_kwargs or v is None: return v @@ -69,7 +74,7 @@ def check_kwargs( @field_validator(V_POSITIONAL_ONLY_NAME, check_fields=False) @classmethod - def check_positional_only(cls, v: Optional[List[str]]) -> None: + def check_positional_only(cls, v: Optional[list[str]]) -> None: if v is None: return @@ -82,7 +87,7 @@ def check_positional_only(cls, v: Optional[List[str]]) -> None: @field_validator(V_DUPLICATE_KWARGS, check_fields=False) @classmethod - def check_duplicate_kwargs(cls, v: Optional[List[str]]) -> None: + def check_duplicate_kwargs(cls, v: Optional[list[str]]) -> None: if v is None: return diff --git a/src/prefect/_internal/retries.py b/src/prefect/_internal/retries.py index 097305e3e7c2..08cc21e9a252 100644 --- a/src/prefect/_internal/retries.py +++ b/src/prefect/_internal/retries.py @@ -1,10 +1,15 @@ import asyncio from functools import wraps -from typing import Any, Callable, Tuple, Type +from typing import Callable, Optional, Tuple, Type, TypeVar + +from typing_extensions import ParamSpec from prefect._internal._logging import logger from prefect.utilities.math import clamped_poisson_interval +P = ParamSpec("P") +R = TypeVar("R") + def exponential_backoff_with_jitter( attempt: int, base_delay: float, max_delay: float @@ -21,7 +26,8 @@ def retry_async_fn( base_delay: float = 1, max_delay: float = 10, retry_on_exceptions: Tuple[Type[Exception], ...] = (Exception,), -): + operation_name: Optional[str] = None, +) -> Callable[[Callable[P, R]], Callable[P, R]]: """A decorator for retrying an async function. Args: @@ -33,23 +39,26 @@ def retry_async_fn( max_delay: The maximum delay to use for the last attempt. retry_on_exceptions: A tuple of exception types to retry on. Defaults to retrying on all exceptions. + operation_name: Optional name to use for logging the operation instead of + the function name. If None, uses the function name. """ - def decorator(func): + def decorator(func: Callable[P, R]) -> Callable[P, R]: @wraps(func) - async def wrapper(*args: Any, **kwargs: Any) -> Any: + async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + name = operation_name or func.__name__ for attempt in range(max_attempts): try: return await func(*args, **kwargs) except retry_on_exceptions as e: if attempt == max_attempts - 1: logger.exception( - f"Function {func.__name__!r} failed after {max_attempts} attempts" + f"Function {name!r} failed after {max_attempts} attempts" ) raise delay = backoff_strategy(attempt, base_delay, max_delay) logger.warning( - f"Attempt {attempt + 1} of function {func.__name__!r} failed with {type(e).__name__}. " + f"Attempt {attempt + 1} of function {name!r} failed with {type(e).__name__}: {str(e)}. " f"Retrying in {delay:.2f} seconds..." ) await asyncio.sleep(delay) diff --git a/src/prefect/_internal/schemas/bases.py b/src/prefect/_internal/schemas/bases.py index 1c41ad033e0d..62804f8b478a 100644 --- a/src/prefect/_internal/schemas/bases.py +++ b/src/prefect/_internal/schemas/bases.py @@ -4,7 +4,7 @@ import datetime import os -from typing import Any, ClassVar, Optional, Set, TypeVar +from typing import Any, ClassVar, Generator, Optional, Set, TypeVar, cast from uuid import UUID, uuid4 import pendulum @@ -13,9 +13,10 @@ ConfigDict, Field, ) -from pydantic_extra_types.pendulum_dt import DateTime from typing_extensions import Self +from prefect.types import DateTime + T = TypeVar("T") @@ -33,7 +34,7 @@ class PrefectBaseModel(BaseModel): _reset_fields: ClassVar[Set[str]] = set() - model_config = ConfigDict( + model_config: ClassVar[ConfigDict] = ConfigDict( ser_json_timedelta="float", defer_build=True, extra=( @@ -58,7 +59,7 @@ def __eq__(self, other: Any) -> bool: else: return copy_dict == other - def __rich_repr__(self): + def __rich_repr__(self) -> Generator[tuple[str, Any, Any], None, None]: # Display all of the fields in the model if they differ from the default value for name, field in self.model_fields.items(): value = getattr(self, name) @@ -71,9 +72,11 @@ def __rich_repr__(self): and name == "timestamp" and value ): - value = pendulum.instance(value).isoformat() + value = cast(pendulum.DateTime, pendulum.instance(value)).isoformat() elif isinstance(field.annotation, datetime.datetime) and value: - value = pendulum.instance(value).diff_for_humans() + value = cast( + pendulum.DateTime, pendulum.instance(value) + ).diff_for_humans() yield name, value, field.get_default() @@ -113,11 +116,11 @@ class ObjectBaseModel(IDBaseModel): """ _reset_fields: ClassVar[Set[str]] = {"id", "created", "updated"} - model_config = ConfigDict(from_attributes=True) + model_config: ClassVar[ConfigDict] = ConfigDict(from_attributes=True) created: Optional[DateTime] = Field(default=None, repr=False) updated: Optional[DateTime] = Field(default=None, repr=False) class ActionBaseModel(PrefectBaseModel): - model_config: ConfigDict = ConfigDict(extra="forbid") + model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid") diff --git a/src/prefect/_internal/schemas/validators.py b/src/prefect/_internal/schemas/validators.py index 9bda7fc5edff..52380cce951c 100644 --- a/src/prefect/_internal/schemas/validators.py +++ b/src/prefect/_internal/schemas/validators.py @@ -13,13 +13,14 @@ from copy import copy from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union +from uuid import UUID import jsonschema import pendulum import yaml -from pydantic_extra_types.pendulum_dt import DateTime from prefect.exceptions import InvalidRepositoryURLError +from prefect.types import DateTime from prefect.utilities.collections import isiterable from prefect.utilities.dockerutils import get_prefect_image_name from prefect.utilities.filesystem import relative_path_to_current_platform @@ -32,6 +33,7 @@ if TYPE_CHECKING: from prefect.blocks.core import Block + from prefect.serializers import Serializer from prefect.utilities.callables import ParameterSchema @@ -577,7 +579,7 @@ def validate_picklelib_and_modules(values: dict) -> dict: return values -def validate_dump_kwargs(value: dict) -> dict: +def validate_dump_kwargs(value: dict[str, Any]) -> dict[str, Any]: # `default` is set by `object_encoder`. A user provided callable would make this # class unserializable anyway. if "default" in value: @@ -585,7 +587,7 @@ def validate_dump_kwargs(value: dict) -> dict: return value -def validate_load_kwargs(value: dict) -> dict: +def validate_load_kwargs(value: dict[str, Any]) -> dict[str, Any]: # `object_hook` is set by `object_decoder`. A user provided callable would make # this class unserializable anyway. if "object_hook" in value: @@ -595,7 +597,7 @@ def validate_load_kwargs(value: dict) -> dict: return value -def cast_type_names_to_serializers(value): +def cast_type_names_to_serializers(value: Union[str, "Serializer"]) -> "Serializer": from prefect.serializers import Serializer if isinstance(value, str): @@ -653,7 +655,7 @@ def validate_message_template_variables(v: Optional[str]) -> Optional[str]: return v -def validate_default_queue_id_not_none(v: Optional[str]) -> Optional[str]: +def validate_default_queue_id_not_none(v: Optional[UUID]) -> UUID: if v is None: raise ValueError( "`default_queue_id` is a required field. If you are " diff --git a/src/prefect/automations.py b/src/prefect/automations.py index 86799ec59cd6..a37c5a3a45dd 100644 --- a/src/prefect/automations.py +++ b/src/prefect/automations.py @@ -1,10 +1,10 @@ -from typing import Optional +from typing import Optional, Type from uuid import UUID from pydantic import Field from typing_extensions import Self -from prefect.client.utilities import get_or_create_client +from prefect.client.orchestration import get_client from prefect.events.actions import ( CallWebhook, CancelFlowRun, @@ -99,10 +99,10 @@ async def create(self: Self) -> Self: ) created_automation = auto_to_create.create() """ - client, _ = get_or_create_client() - automation = AutomationCore(**self.model_dump(exclude={"id"})) - self.id = await client.create_automation(automation=automation) - return self + async with get_client() as client: + automation = AutomationCore(**self.model_dump(exclude={"id"})) + self.id = await client.create_automation(automation=automation) + return self @sync_compatible async def update(self: Self): @@ -112,15 +112,16 @@ async def update(self: Self): auto.name = "new name" auto.update() """ - - client, _ = get_or_create_client() - automation = AutomationCore(**self.model_dump(exclude={"id", "owner_resource"})) - await client.update_automation(automation_id=self.id, automation=automation) + async with get_client() as client: + automation = AutomationCore( + **self.model_dump(exclude={"id", "owner_resource"}) + ) + await client.update_automation(automation_id=self.id, automation=automation) @classmethod @sync_compatible async def read( - cls: Self, id: Optional[UUID] = None, name: Optional[str] = None + cls: Type[Self], id: Optional[UUID] = None, name: Optional[str] = None ) -> Self: """ Read an automation by ID or name. @@ -134,20 +135,25 @@ async def read( raise ValueError("Only one of id or name can be provided") if not id and not name: raise ValueError("One of id or name must be provided") - client, _ = get_or_create_client() - if id: - try: - automation = await client.read_automation(automation_id=id) - except PrefectHTTPStatusError as exc: - if exc.response.status_code == 404: + async with get_client() as client: + if id: + try: + automation = await client.read_automation(automation_id=id) + except PrefectHTTPStatusError as exc: + if exc.response.status_code == 404: + raise ValueError(f"Automation with ID {id!r} not found") + raise + if automation is None: raise ValueError(f"Automation with ID {id!r} not found") - return Automation(**automation.model_dump()) - else: - automation = await client.read_automations_by_name(name=name) - if len(automation) > 0: - return Automation(**automation[0].model_dump()) if automation else None + return Automation(**automation.model_dump()) else: - raise ValueError(f"Automation with name {name!r} not found") + automation = await client.read_automations_by_name(name=name) + if len(automation) > 0: + return ( + Automation(**automation[0].model_dump()) if automation else None + ) + else: + raise ValueError(f"Automation with name {name!r} not found") @sync_compatible async def delete(self: Self) -> bool: @@ -155,14 +161,14 @@ async def delete(self: Self) -> bool: auto = Automation.read(id = 123) auto.delete() """ - try: - client, _ = get_or_create_client() - await client.delete_automation(self.id) - return True - except PrefectHTTPStatusError as exc: - if exc.response.status_code == 404: - return False - raise + async with get_client() as client: + try: + await client.delete_automation(self.id) + return True + except PrefectHTTPStatusError as exc: + if exc.response.status_code == 404: + return False + raise @sync_compatible async def disable(self: Self) -> bool: @@ -171,14 +177,14 @@ async def disable(self: Self) -> bool: auto = Automation.read(id = 123) auto.disable() """ - try: - client, _ = get_or_create_client() - await client.pause_automation(self.id) - return True - except PrefectHTTPStatusError as exc: - if exc.response.status_code == 404: - return False - raise + async with get_client() as client: + try: + await client.pause_automation(self.id) + return True + except PrefectHTTPStatusError as exc: + if exc.response.status_code == 404: + return False + raise @sync_compatible async def enable(self: Self) -> bool: @@ -187,11 +193,11 @@ async def enable(self: Self) -> bool: auto = Automation.read(id = 123) auto.enable() """ - try: - client, _ = get_or_create_client() - await client.resume_automation("asd") - return True - except PrefectHTTPStatusError as exc: - if exc.response.status_code == 404: - return False - raise + async with get_client() as client: + try: + await client.resume_automation(self.id) + return True + except PrefectHTTPStatusError as exc: + if exc.response.status_code == 404: + return False + raise diff --git a/src/prefect/blocks/abstract.py b/src/prefect/blocks/abstract.py index c35a8bf5a462..cf30d546a1d1 100644 --- a/src/prefect/blocks/abstract.py +++ b/src/prefect/blocks/abstract.py @@ -1,6 +1,6 @@ from abc import ABC, abstractmethod from contextlib import contextmanager -from logging import Logger +from logging import Logger, LoggerAdapter from pathlib import Path from typing import ( Any, @@ -15,7 +15,7 @@ Union, ) -from typing_extensions import Self +from typing_extensions import Self, TypeAlias from prefect.blocks.core import Block from prefect.exceptions import MissingContextError @@ -23,6 +23,8 @@ T = TypeVar("T") +LoggerOrAdapter: TypeAlias = Union[Logger, LoggerAdapter] + class CredentialsBlock(Block, ABC): """ @@ -34,7 +36,7 @@ class CredentialsBlock(Block, ABC): """ @property - def logger(self) -> Logger: + def logger(self) -> LoggerOrAdapter: """ Returns a logger based on whether the CredentialsBlock is called from within a flow or task run context. @@ -73,10 +75,10 @@ class NotificationBlock(Block, ABC): """ _block_schema_capabilities = ["notify"] - _events_excluded_methods = Block._events_excluded_methods.default + ["notify"] + _events_excluded_methods = Block._events_excluded_methods + ["notify"] @property - def logger(self) -> Logger: + def logger(self) -> LoggerOrAdapter: """ Returns a logger based on whether the NotificationBlock is called from within a flow or task run context. @@ -123,7 +125,7 @@ class JobRun(ABC, Generic[T]): # not a block """ @property - def logger(self) -> Logger: + def logger(self) -> LoggerOrAdapter: """ Returns a logger based on whether the JobRun is called from within a flow or task run context. @@ -158,7 +160,7 @@ class JobBlock(Block, ABC): """ @property - def logger(self) -> Logger: + def logger(self) -> LoggerOrAdapter: """ Returns a logger based on whether the JobBlock is called from within a flow or task run context. @@ -202,7 +204,7 @@ class DatabaseBlock(Block, ABC): """ @property - def logger(self) -> Logger: + def logger(self) -> LoggerOrAdapter: """ Returns a logger based on whether the DatabaseBlock is called from within a flow or task run context. @@ -337,7 +339,7 @@ class ObjectStorageBlock(Block, ABC): """ @property - def logger(self) -> Logger: + def logger(self) -> LoggerOrAdapter: """ Returns a logger based on whether the ObjectStorageBlock is called from within a flow or task run context. @@ -469,7 +471,7 @@ class SecretBlock(Block, ABC): """ @property - def logger(self) -> Logger: + def logger(self) -> LoggerOrAdapter: """ Returns a logger based on whether the SecretBlock is called from within a flow or task run context. diff --git a/src/prefect/blocks/core.py b/src/prefect/blocks/core.py index 5b0b6388bf8c..0dc4e1b9e97d 100644 --- a/src/prefect/blocks/core.py +++ b/src/prefect/blocks/core.py @@ -41,6 +41,7 @@ from typing_extensions import Literal, ParamSpec, Self, get_args import prefect.exceptions +from prefect._internal.compatibility.async_dispatch import async_dispatch from prefect.client.schemas import ( DEFAULT_BLOCK_SCHEMA_VERSION, BlockDocument, @@ -53,7 +54,7 @@ from prefect.logging.loggers import disable_logger from prefect.plugins import load_prefect_collections from prefect.types import SecretDict -from prefect.utilities.asyncutils import sync_compatible +from prefect.utilities.asyncutils import run_coro_as_sync, sync_compatible from prefect.utilities.collections import listrepr, remove_nested_keys, visit_collection from prefect.utilities.dispatch import lookup_type, register_base_type from prefect.utilities.hashing import hash_objects @@ -64,7 +65,7 @@ if TYPE_CHECKING: from pydantic.main import IncEx - from prefect.client.orchestration import PrefectClient + from prefect.client.orchestration import PrefectClient, SyncPrefectClient R = TypeVar("R") P = ParamSpec("P") @@ -280,7 +281,7 @@ class Block(BaseModel, ABC): json_schema_extra=schema_extra, ) - def __init__(self, *args, **kwargs): + def __init__(self, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) self.block_initialization() @@ -326,7 +327,9 @@ def block_initialization(self) -> None: # Exclude `save` as it uses the `sync_compatible` decorator and needs to be # decorated directly. - _events_excluded_methods = ["block_initialization", "save", "dict"] + _events_excluded_methods: ClassVar[List[str]] = PrivateAttr( + default=["block_initialization", "save", "dict"] + ) @classmethod def __dispatch_key__(cls): @@ -626,7 +629,8 @@ def _generate_code_example(cls) -> str: """Generates a default code example for the current class""" qualified_name = to_qualified_name(cls) module_str = ".".join(qualified_name.split(".")[:-1]) - class_name = cls.__name__ + origin = cls.__pydantic_generic_metadata__.get("origin") or cls + class_name = origin.__name__ block_variable_name = f'{cls.get_block_type_slug().replace("-", "_")}_block' return dedent( @@ -775,12 +779,11 @@ def _define_metadata_on_nested_blocks( ) @classmethod - @inject_client - async def _get_block_document( + async def _aget_block_document( cls, name: str, - client: Optional["PrefectClient"] = None, - ): + client: "PrefectClient", + ) -> tuple[BlockDocument, str]: if cls.__name__ == "Block": block_type_slug, block_document_name = name.split("/", 1) else: @@ -799,6 +802,30 @@ async def _get_block_document( return block_document, block_document_name + @classmethod + def _get_block_document( + cls, + name: str, + client: "SyncPrefectClient", + ) -> tuple[BlockDocument, str]: + if cls.__name__ == "Block": + block_type_slug, block_document_name = name.split("/", 1) + else: + block_type_slug = cls.get_block_type_slug() + block_document_name = name + + try: + block_document = client.read_block_document_by_name( + name=block_document_name, block_type_slug=block_type_slug + ) + except prefect.exceptions.ObjectNotFound as e: + raise ValueError( + f"Unable to find block document named {block_document_name} for block" + f" type {block_type_slug}" + ) from e + + return block_document, block_document_name + @classmethod @sync_compatible @inject_client @@ -827,9 +854,97 @@ async def _get_block_document_by_id( return block_document, block_document.name @classmethod - @sync_compatible @inject_client - async def load( + async def aload( + cls, + name: str, + validate: bool = True, + client: Optional["PrefectClient"] = None, + ) -> "Self": + """ + Retrieves data from the block document with the given name for the block type + that corresponds with the current class and returns an instantiated version of + the current class with the data stored in the block document. + + If a block document for a given block type is saved with a different schema + than the current class calling `aload`, a warning will be raised. + + If the current class schema is a subset of the block document schema, the block + can be loaded as normal using the default `validate = True`. + + If the current class schema is a superset of the block document schema, `aload` + must be called with `validate` set to False to prevent a validation error. In + this case, the block attributes will default to `None` and must be set manually + and saved to a new block document before the block can be used as expected. + + Args: + name: The name or slug of the block document. A block document slug is a + string with the format / + validate: If False, the block document will be loaded without Pydantic + validating the block schema. This is useful if the block schema has + changed client-side since the block document referred to by `name` was saved. + client: The client to use to load the block document. If not provided, the + default client will be injected. + + Raises: + ValueError: If the requested block document is not found. + + Returns: + An instance of the current class hydrated with the data stored in the + block document with the specified name. + + Examples: + Load from a Block subclass with a block document name: + ```python + class Custom(Block): + message: str + + Custom(message="Hello!").save("my-custom-message") + + loaded_block = await Custom.aload("my-custom-message") + ``` + + Load from Block with a block document slug: + ```python + class Custom(Block): + message: str + + Custom(message="Hello!").save("my-custom-message") + + loaded_block = await Block.aload("custom/my-custom-message") + ``` + + Migrate a block document to a new schema: + ```python + # original class + class Custom(Block): + message: str + + Custom(message="Hello!").save("my-custom-message") + + # Updated class with new required field + class Custom(Block): + message: str + number_of_ducks: int + + loaded_block = await Custom.aload("my-custom-message", validate=False) + + # Prints UserWarning about schema mismatch + + loaded_block.number_of_ducks = 42 + + loaded_block.save("my-custom-message", overwrite=True) + ``` + """ + if TYPE_CHECKING: + assert isinstance(client, PrefectClient) + block_document, _ = await cls._aget_block_document(name, client=client) + + return cls._load_from_block_document(block_document, validate=validate) + + @classmethod + @async_dispatch(aload) + def load( cls, name: str, validate: bool = True, @@ -910,9 +1025,19 @@ class Custom(Block): loaded_block.save("my-custom-message", overwrite=True) ``` """ - block_document, block_document_name = await cls._get_block_document( - name, client=client - ) + # Need to use a `PrefectClient` here to ensure `Block.load` and `Block.aload` signatures match + # TODO: replace with only sync client once all internal calls are updated to use `Block.aload` and `@async_dispatch` is removed + if client is None: + # If a client wasn't provided, we get to use a sync client + from prefect.client.orchestration import get_client + + with get_client(sync_client=True) as sync_client: + block_document, _ = cls._get_block_document(name, client=sync_client) + else: + # If a client was provided, reuse it, even though it's async, to avoid excessive client creation + block_document, _ = run_coro_as_sync( + cls._aget_block_document(name, client=client) + ) return cls._load_from_block_document(block_document, validate=validate) @@ -966,14 +1091,16 @@ async def load_from_ref( """ block_document = None if isinstance(ref, (str, UUID)): - block_document, _ = await cls._get_block_document_by_id(ref) + block_document, _ = await cls._get_block_document_by_id(ref, client=client) elif isinstance(ref, dict): if block_document_id := ref.get("block_document_id"): block_document, _ = await cls._get_block_document_by_id( - block_document_id + block_document_id, client=client ) elif block_document_slug := ref.get("block_document_slug"): - block_document, _ = await cls._get_block_document(block_document_slug) + block_document, _ = await cls._get_block_document( + block_document_slug, client=client + ) if not block_document: raise ValueError(f"Invalid reference format {ref!r}.") @@ -1218,7 +1345,9 @@ async def delete( name: str, client: Optional["PrefectClient"] = None, ): - block_document, block_document_name = await cls._get_block_document(name) + if TYPE_CHECKING: + assert isinstance(client, PrefectClient) + block_document, _ = await cls._aget_block_document(name, client=client) await client.delete_block_document(block_document.id) diff --git a/src/prefect/blocks/system.py b/src/prefect/blocks/system.py index 135e7f21e0f1..828fbced738a 100644 --- a/src/prefect/blocks/system.py +++ b/src/prefect/blocks/system.py @@ -9,10 +9,10 @@ field_validator, ) from pydantic import Secret as PydanticSecret -from pydantic_extra_types.pendulum_dt import DateTime as PydanticDateTime from prefect._internal.compatibility.deprecated import deprecated_class from prefect.blocks.core import Block +from prefect.types import DateTime as PydanticDateTime _SecretValueType = Union[ Annotated[StrictStr, Field(title="string")], @@ -130,6 +130,7 @@ class Secret(Block, Generic[T]): _logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/c6f20e556dd16effda9df16551feecfb5822092b-48x48.png" _documentation_url = "https://docs.prefect.io/latest/develop/blocks" + _description = "A block that represents a secret value. The value stored in this block will be obfuscated when this block is viewed or edited in the UI." value: Union[SecretStr, PydanticSecret[T]] = Field( default=..., diff --git a/src/prefect/cache_policies.py b/src/prefect/cache_policies.py index 50717e5ceaea..746f8561cdfa 100644 --- a/src/prefect/cache_policies.py +++ b/src/prefect/cache_policies.py @@ -75,12 +75,12 @@ def compute_key( task_ctx: TaskRunContext, inputs: Dict[str, Any], flow_parameters: Dict[str, Any], - **kwargs, + **kwargs: Any, ) -> Optional[str]: raise NotImplementedError def __sub__(self, other: str) -> "CachePolicy": - if not isinstance(other, str): + if not isinstance(other, str): # type: ignore[reportUnnecessaryIsInstance] raise TypeError("Can only subtract strings from key policies.") new = Inputs(exclude=[other]) return CompoundCachePolicy(policies=[self, new]) @@ -140,7 +140,7 @@ def compute_key( task_ctx: TaskRunContext, inputs: Dict[str, Any], flow_parameters: Dict[str, Any], - **kwargs, + **kwargs: Any, ) -> Optional[str]: if self.cache_key_fn: return self.cache_key_fn(task_ctx, inputs) @@ -162,9 +162,9 @@ def compute_key( task_ctx: TaskRunContext, inputs: Dict[str, Any], flow_parameters: Dict[str, Any], - **kwargs, + **kwargs: Any, ) -> Optional[str]: - keys = [] + keys: list[str] = [] for policy in self.policies: policy_key = policy.compute_key( task_ctx=task_ctx, @@ -191,7 +191,7 @@ def compute_key( task_ctx: TaskRunContext, inputs: Dict[str, Any], flow_parameters: Dict[str, Any], - **kwargs, + **kwargs: Any, ) -> Optional[str]: return None @@ -211,7 +211,7 @@ def compute_key( task_ctx: TaskRunContext, inputs: Optional[Dict[str, Any]], flow_parameters: Optional[Dict[str, Any]], - **kwargs, + **kwargs: Any, ) -> Optional[str]: if not task_ctx: return None @@ -238,7 +238,7 @@ def compute_key( task_ctx: TaskRunContext, inputs: Dict[str, Any], flow_parameters: Dict[str, Any], - **kwargs, + **kwargs: Any, ) -> Optional[str]: if not flow_parameters: return None @@ -257,7 +257,7 @@ def compute_key( task_ctx: TaskRunContext, inputs: Dict[str, Any], flow_parameters: Dict[str, Any], - **kwargs, + **kwargs: Any, ) -> Optional[str]: if not task_ctx: return None @@ -280,7 +280,7 @@ def compute_key( task_ctx: TaskRunContext, inputs: Dict[str, Any], flow_parameters: Dict[str, Any], - **kwargs, + **kwargs: Any, ) -> Optional[str]: hashed_inputs = {} inputs = inputs or {} @@ -307,7 +307,7 @@ def compute_key( raise ValueError(msg) from exc def __sub__(self, other: str) -> "CachePolicy": - if not isinstance(other, str): + if not isinstance(other, str): # type: ignore[reportUnnecessaryIsInstance] raise TypeError("Can only subtract strings from key policies.") return Inputs(exclude=self.exclude + [other]) diff --git a/src/prefect/cli/_prompts.py b/src/prefect/cli/_prompts.py index 1d18507eb5a0..9fc542d5ac68 100644 --- a/src/prefect/cli/_prompts.py +++ b/src/prefect/cli/_prompts.py @@ -503,7 +503,7 @@ async def prompt_push_custom_docker_image( docker_registry_creds_name = f"deployment-{slugify(deployment_config['name'])}-{slugify(deployment_config['work_pool']['name'])}-registry-creds" create_new_block = False try: - await credentials_block.load(docker_registry_creds_name) + await credentials_block.aload(docker_registry_creds_name) if not confirm( ( "Would you like to use the existing Docker registry credentials" diff --git a/src/prefect/cli/cloud/__init__.py b/src/prefect/cli/cloud/__init__.py index 5bc9e9ec02c6..f698fbd58160 100644 --- a/src/prefect/cli/cloud/__init__.py +++ b/src/prefect/cli/cloud/__init__.py @@ -41,7 +41,6 @@ ) from prefect.utilities.asyncutils import run_sync_in_worker_thread from prefect.utilities.collections import listrepr -from prefect.utilities.compat import raise_signal from pydantic import BaseModel diff --git a/src/prefect/cli/deploy.py b/src/prefect/cli/deploy.py index 63d9e079e8df..0c5ec0851855 100644 --- a/src/prefect/cli/deploy.py +++ b/src/prefect/cli/deploy.py @@ -1028,7 +1028,7 @@ async def _generate_git_clone_pull_step( ) try: - await Secret.load(token_secret_block_name) + await Secret.aload(token_secret_block_name) if not confirm( ( "We found an existing token saved for this deployment. Would" diff --git a/src/prefect/client/__init__.py b/src/prefect/client/__init__.py index 5d2fc25a2a9f..df0bfd34dcab 100644 --- a/src/prefect/client/__init__.py +++ b/src/prefect/client/__init__.py @@ -16,6 +16,8 @@ """ +from collections.abc import Callable +from typing import Any from prefect._internal.compatibility.migration import getattr_migration -__getattr__ = getattr_migration(__name__) +__getattr__: Callable[[str], Any] = getattr_migration(__name__) diff --git a/src/prefect/client/base.py b/src/prefect/client/base.py index 4a018e2f7352..7eed8a92a497 100644 --- a/src/prefect/client/base.py +++ b/src/prefect/client/base.py @@ -4,22 +4,11 @@ import time import uuid from collections import defaultdict +from collections.abc import AsyncGenerator, Awaitable, MutableMapping from contextlib import asynccontextmanager from datetime import datetime, timezone -from typing import ( - Any, - AsyncGenerator, - Awaitable, - Callable, - Dict, - MutableMapping, - Optional, - Protocol, - Set, - Tuple, - Type, - runtime_checkable, -) +from logging import Logger +from typing import TYPE_CHECKING, Any, Callable, Optional, Protocol, runtime_checkable import anyio import httpx @@ -46,14 +35,14 @@ # Datastores for lifespan management, keys should be a tuple of thread and app # identities. -APP_LIFESPANS: Dict[Tuple[int, int], LifespanManager] = {} -APP_LIFESPANS_REF_COUNTS: Dict[Tuple[int, int], int] = {} +APP_LIFESPANS: dict[tuple[int, int], LifespanManager] = {} +APP_LIFESPANS_REF_COUNTS: dict[tuple[int, int], int] = {} # Blocks concurrent access to the above dicts per thread. The index should be the thread # identity. -APP_LIFESPANS_LOCKS: Dict[int, anyio.Lock] = defaultdict(anyio.Lock) +APP_LIFESPANS_LOCKS: dict[int, anyio.Lock] = defaultdict(anyio.Lock) -logger = get_logger("client") +logger: Logger = get_logger("client") # Define ASGI application types for type checking @@ -161,7 +150,7 @@ class PrefectResponse(httpx.Response): Provides more informative error messages. """ - def raise_for_status(self) -> None: + def raise_for_status(self) -> Response: """ Raise an exception if the response contains an HTTPStatusError. @@ -174,9 +163,9 @@ def raise_for_status(self) -> None: raise PrefectHTTPStatusError.from_httpx_error(exc) from exc.__cause__ @classmethod - def from_httpx_response(cls: Type[Self], response: httpx.Response) -> Self: + def from_httpx_response(cls: type[Self], response: httpx.Response) -> Response: """ - Create a `PrefectReponse` from an `httpx.Response`. + Create a `PrefectResponse` from an `httpx.Response`. By changing the `__class__` attribute of the Response, we change the method resolution order to look for methods defined in PrefectResponse, while leaving @@ -200,10 +189,10 @@ class PrefectHttpxAsyncClient(httpx.AsyncClient): def __init__( self, - *args, + *args: Any, enable_csrf_support: bool = False, raise_on_all_errors: bool = True, - **kwargs, + **kwargs: Any, ): self.enable_csrf_support: bool = enable_csrf_support self.csrf_token: Optional[str] = None @@ -222,10 +211,10 @@ async def _send_with_retry( self, request: Request, send: Callable[[Request], Awaitable[Response]], - send_args: Tuple, - send_kwargs: Dict, - retry_codes: Set[int] = set(), - retry_exceptions: Tuple[Exception, ...] = tuple(), + send_args: tuple[Any, ...], + send_kwargs: dict[str, Any], + retry_codes: set[int] = set(), + retry_exceptions: tuple[type[Exception], ...] = tuple(), ): """ Send a request and retry it if it fails. @@ -240,6 +229,11 @@ async def _send_with_retry( try_count = 0 response = None + if TYPE_CHECKING: + # older httpx versions type method as str | bytes | Unknown + # but in reality it is always a string. + assert isinstance(request.method, str) # type: ignore + is_change_request = request.method.lower() in {"post", "put", "patch", "delete"} if self.enable_csrf_support and is_change_request: @@ -297,7 +291,7 @@ async def _send_with_retry( if exc_info else ( "Received response with retryable status code" - f" {response.status_code}. " + f" {response.status_code if response else 'unknown'}. " ) ) + f"Another attempt will be made in {retry_seconds}s. " @@ -314,7 +308,7 @@ async def _send_with_retry( # We ran out of retries, return the failed response return response - async def send(self, request: Request, *args, **kwargs) -> Response: + async def send(self, request: Request, *args: Any, **kwargs: Any) -> Response: """ Send a request with automatic retry behavior for the following status codes: @@ -414,10 +408,10 @@ class PrefectHttpxSyncClient(httpx.Client): def __init__( self, - *args, + *args: Any, enable_csrf_support: bool = False, raise_on_all_errors: bool = True, - **kwargs, + **kwargs: Any, ): self.enable_csrf_support: bool = enable_csrf_support self.csrf_token: Optional[str] = None @@ -436,10 +430,10 @@ def _send_with_retry( self, request: Request, send: Callable[[Request], Response], - send_args: Tuple, - send_kwargs: Dict, - retry_codes: Set[int] = set(), - retry_exceptions: Tuple[Exception, ...] = tuple(), + send_args: tuple[Any, ...], + send_kwargs: dict[str, Any], + retry_codes: set[int] = set(), + retry_exceptions: tuple[type[Exception], ...] = tuple(), ): """ Send a request and retry it if it fails. @@ -454,6 +448,11 @@ def _send_with_retry( try_count = 0 response = None + if TYPE_CHECKING: + # older httpx versions type method as str | bytes | Unknown + # but in reality it is always a string. + assert isinstance(request.method, str) # type: ignore + is_change_request = request.method.lower() in {"post", "put", "patch", "delete"} if self.enable_csrf_support and is_change_request: @@ -511,7 +510,7 @@ def _send_with_retry( if exc_info else ( "Received response with retryable status code" - f" {response.status_code}. " + f" {response.status_code if response else 'unknown'}. " ) ) + f"Another attempt will be made in {retry_seconds}s. " @@ -528,7 +527,7 @@ def _send_with_retry( # We ran out of retries, return the failed response return response - def send(self, request: Request, *args, **kwargs) -> Response: + def send(self, request: Request, *args: Any, **kwargs: Any) -> Response: """ Send a request with automatic retry behavior for the following status codes: diff --git a/src/prefect/client/cloud.py b/src/prefect/client/cloud.py index 38a69150e922..90cae81b0f51 100644 --- a/src/prefect/client/cloud.py +++ b/src/prefect/client/cloud.py @@ -1,11 +1,12 @@ import re -from typing import Any, Dict, List, Optional, cast +from typing import Any, NoReturn, Optional, cast from uuid import UUID import anyio import httpx import pydantic from starlette import status +from typing_extensions import Self import prefect.context import prefect.settings @@ -30,7 +31,7 @@ def get_cloud_client( host: Optional[str] = None, api_key: Optional[str] = None, - httpx_settings: Optional[dict] = None, + httpx_settings: Optional[dict[str, Any]] = None, infer_cloud_url: bool = False, ) -> "CloudClient": """ @@ -45,6 +46,9 @@ def get_cloud_client( configured_url = prefect.settings.PREFECT_API_URL.value() host = re.sub(PARSE_API_URL_REGEX, "", configured_url) + if host is None: + raise ValueError("Host was not provided and could not be inferred") + return CloudClient( host=host, api_key=api_key or PREFECT_API_KEY.value(), @@ -59,11 +63,14 @@ class CloudUnauthorizedError(PrefectException): class CloudClient: + account_id: Optional[str] = None + workspace_id: Optional[str] = None + def __init__( self, host: str, api_key: str, - httpx_settings: Optional[Dict[str, Any]] = None, + httpx_settings: Optional[dict[str, Any]] = None, ) -> None: httpx_settings = httpx_settings or dict() httpx_settings.setdefault("headers", dict()) @@ -76,7 +83,7 @@ def __init__( **httpx_settings, enable_csrf_support=False ) - api_url = prefect.settings.PREFECT_API_URL.value() or "" + api_url: str = prefect.settings.PREFECT_API_URL.value() or "" if match := ( re.search(PARSE_API_URL_REGEX, host) or re.search(PARSE_API_URL_REGEX, api_url) @@ -97,7 +104,7 @@ def workspace_base_url(self) -> str: return f"{self.account_base_url}/workspaces/{self.workspace_id}" - async def api_healthcheck(self): + async def api_healthcheck(self) -> None: """ Attempts to connect to the Cloud API and raises the encountered exception if not successful. @@ -107,8 +114,8 @@ async def api_healthcheck(self): with anyio.fail_after(10): await self.read_workspaces() - async def read_workspaces(self) -> List[Workspace]: - workspaces = pydantic.TypeAdapter(List[Workspace]).validate_python( + async def read_workspaces(self) -> list[Workspace]: + workspaces = pydantic.TypeAdapter(list[Workspace]).validate_python( await self.get("/me/workspaces") ) return workspaces @@ -121,17 +128,17 @@ async def read_current_workspace(self) -> Workspace: return workspace raise ValueError("Current workspace not found") - async def read_worker_metadata(self) -> Dict[str, Any]: + async def read_worker_metadata(self) -> dict[str, Any]: response = await self.get( f"{self.workspace_base_url}/collections/work_pool_types" ) - return cast(Dict[str, Any], response) + return cast(dict[str, Any], response) - async def read_account_settings(self) -> Dict[str, Any]: + async def read_account_settings(self) -> dict[str, Any]: response = await self.get(f"{self.account_base_url}/settings") - return cast(Dict[str, Any], response) + return cast(dict[str, Any], response) - async def update_account_settings(self, settings: Dict[str, Any]): + async def update_account_settings(self, settings: dict[str, Any]) -> None: await self.request( "PATCH", f"{self.account_base_url}/settings", @@ -142,7 +149,7 @@ async def read_account_ip_allowlist(self) -> IPAllowlist: response = await self.get(f"{self.account_base_url}/ip_allowlist") return IPAllowlist.model_validate(response) - async def update_account_ip_allowlist(self, updated_allowlist: IPAllowlist): + async def update_account_ip_allowlist(self, updated_allowlist: IPAllowlist) -> None: await self.request( "PUT", f"{self.account_base_url}/ip_allowlist", @@ -172,26 +179,26 @@ async def update_flow_run_labels( json=labels, ) - async def __aenter__(self): + async def __aenter__(self) -> Self: await self._client.__aenter__() return self - async def __aexit__(self, *exc_info): + async def __aexit__(self, *exc_info: Any) -> None: return await self._client.__aexit__(*exc_info) - def __enter__(self): + def __enter__(self) -> NoReturn: raise RuntimeError( "The `CloudClient` must be entered with an async context. Use 'async " "with CloudClient(...)' not 'with CloudClient(...)'" ) - def __exit__(self, *_): + def __exit__(self, *_: object) -> NoReturn: assert False, "This should never be called but must be defined for __enter__" - async def get(self, route, **kwargs): + async def get(self, route: str, **kwargs: Any) -> Any: return await self.request("GET", route, **kwargs) - async def request(self, method, route, **kwargs): + async def request(self, method: str, route: str, **kwargs: Any) -> Any: try: res = await self._client.request(method, route, **kwargs) res.raise_for_status() diff --git a/src/prefect/client/collections.py b/src/prefect/client/collections.py index 12285d50a3d1..e5bd79f04325 100644 --- a/src/prefect/client/collections.py +++ b/src/prefect/client/collections.py @@ -13,12 +13,12 @@ async def read_worker_metadata(self) -> Dict[str, Any]: async def __aenter__(self) -> "CollectionsMetadataClient": ... - async def __aexit__(self, *exc_info) -> Any: + async def __aexit__(self, *exc_info: Any) -> Any: ... def get_collections_metadata_client( - httpx_settings: Optional[Dict] = None, + httpx_settings: Optional[Dict[str, Any]] = None, ) -> "CollectionsMetadataClient": """ Creates a client that can be used to fetch metadata for diff --git a/src/prefect/client/orchestration.py b/src/prefect/client/orchestration.py index ab83a4dcbeb9..197a3bbf87f1 100644 --- a/src/prefect/client/orchestration.py +++ b/src/prefect/client/orchestration.py @@ -2,21 +2,10 @@ import datetime import ssl import warnings +from collections.abc import Iterable from contextlib import AsyncExitStack -from typing import ( - TYPE_CHECKING, - Any, - Dict, - Iterable, - List, - Literal, - Optional, - Set, - Tuple, - TypeVar, - Union, - overload, -) +from logging import Logger +from typing import TYPE_CHECKING, Any, Literal, NoReturn, Optional, Union, overload from uuid import UUID, uuid4 import certifi @@ -27,7 +16,7 @@ from asgi_lifespan import LifespanManager from packaging import version from starlette import status -from typing_extensions import ParamSpec +from typing_extensions import ParamSpec, Self, TypeVar import prefect import prefect.exceptions @@ -138,6 +127,7 @@ PREFECT_TESTING_UNIT_TEST_MODE, get_current_settings, ) +from prefect.types import KeyValueLabelsField if TYPE_CHECKING: from prefect.flows import Flow as FlowObject @@ -152,26 +142,29 @@ ) P = ParamSpec("P") -R = TypeVar("R") +R = TypeVar("R", infer_variance=True) +T = TypeVar("T") @overload def get_client( - httpx_settings: Optional[Dict[str, Any]] = None, sync_client: Literal[False] = False + *, + httpx_settings: Optional[dict[str, Any]] = ..., + sync_client: Literal[False] = False, ) -> "PrefectClient": ... @overload def get_client( - httpx_settings: Optional[Dict[str, Any]] = None, sync_client: Literal[True] = True + *, httpx_settings: Optional[dict[str, Any]] = ..., sync_client: Literal[True] = ... ) -> "SyncPrefectClient": ... def get_client( - httpx_settings: Optional[Dict[str, Any]] = None, sync_client: bool = False -): + httpx_settings: Optional[dict[str, Any]] = None, sync_client: bool = False +) -> Union["SyncPrefectClient", "PrefectClient"]: """ Retrieve a HTTP client for communicating with the Prefect REST API. @@ -200,18 +193,21 @@ def get_client( if sync_client: if client_ctx := prefect.context.SyncClientContext.get(): - if client_ctx.client and client_ctx._httpx_settings == httpx_settings: + if ( + client_ctx.client + and getattr(client_ctx, "_httpx_settings", None) == httpx_settings + ): return client_ctx.client else: if client_ctx := prefect.context.AsyncClientContext.get(): if ( client_ctx.client - and client_ctx._httpx_settings == httpx_settings - and loop in (client_ctx.client._loop, None) + and getattr(client_ctx, "_httpx_settings", None) == httpx_settings + and loop in (getattr(client_ctx.client, "_loop", None), None) ): return client_ctx.client - api = PREFECT_API_URL.value() + api: str = PREFECT_API_URL.value() server_type = None if not api and PREFECT_SERVER_ALLOW_EPHEMERAL_MODE: @@ -277,7 +273,7 @@ def __init__( *, api_key: Optional[str] = None, api_version: Optional[str] = None, - httpx_settings: Optional[Dict[str, Any]] = None, + httpx_settings: Optional[dict[str, Any]] = None, server_type: Optional[ServerType] = None, ) -> None: httpx_settings = httpx_settings.copy() if httpx_settings else {} @@ -357,7 +353,7 @@ def __init__( ) # Connect to an in-process application - elif isinstance(api, ASGIApp): + else: self._ephemeral_app = api self.server_type = ServerType.EPHEMERAL @@ -377,12 +373,6 @@ def __init__( ) httpx_settings.setdefault("base_url", "http://ephemeral-prefect/api") - else: - raise TypeError( - f"Unexpected type {type(api).__name__!r} for argument `api`. Expected" - " 'str' or 'ASGIApp/FastAPI'" - ) - # See https://www.python-httpx.org/advanced/#timeout-configuration httpx_settings.setdefault( "timeout", @@ -426,9 +416,9 @@ def __init__( if isinstance(server_transport, httpx.AsyncHTTPTransport): pool = getattr(server_transport, "_pool", None) if isinstance(pool, httpcore.AsyncConnectionPool): - pool._retries = 3 + setattr(pool, "_retries", 3) - self.logger = get_logger("client") + self.logger: Logger = get_logger("client") @property def api_url(self) -> httpx.URL: @@ -458,7 +448,7 @@ async def hello(self) -> httpx.Response: """ return await self._client.get("/hello") - async def create_flow(self, flow: "FlowObject") -> UUID: + async def create_flow(self, flow: "FlowObject[Any, Any]") -> UUID: """ Create a flow in the Prefect API. @@ -511,19 +501,37 @@ async def read_flow(self, flow_id: UUID) -> Flow: response = await self._client.get(f"/flows/{flow_id}") return Flow.model_validate(response.json()) + async def delete_flow(self, flow_id: UUID) -> None: + """ + Delete a flow by UUID. + + Args: + flow_id: ID of the flow to be deleted + Raises: + prefect.exceptions.ObjectNotFound: If request returns 404 + httpx.RequestError: If requests fail + """ + try: + await self._client.delete(f"/flows/{flow_id}") + except httpx.HTTPStatusError as e: + if e.response.status_code == status.HTTP_404_NOT_FOUND: + raise prefect.exceptions.ObjectNotFound(http_exc=e) from e + else: + raise + async def read_flows( self, *, - flow_filter: FlowFilter = None, - flow_run_filter: FlowRunFilter = None, - task_run_filter: TaskRunFilter = None, - deployment_filter: DeploymentFilter = None, - work_pool_filter: WorkPoolFilter = None, - work_queue_filter: WorkQueueFilter = None, - sort: FlowSort = None, + flow_filter: Optional[FlowFilter] = None, + flow_run_filter: Optional[FlowRunFilter] = None, + task_run_filter: Optional[TaskRunFilter] = None, + deployment_filter: Optional[DeploymentFilter] = None, + work_pool_filter: Optional[WorkPoolFilter] = None, + work_queue_filter: Optional[WorkQueueFilter] = None, + sort: Optional[FlowSort] = None, limit: Optional[int] = None, offset: int = 0, - ) -> List[Flow]: + ) -> list[Flow]: """ Query the Prefect API for flows. Only flows matching all criteria will be returned. @@ -542,7 +550,7 @@ async def read_flows( Returns: a list of Flow model representations of the flows """ - body = { + body: dict[str, Any] = { "flows": flow_filter.model_dump(mode="json") if flow_filter else None, "flow_runs": ( flow_run_filter.model_dump(mode="json", exclude_unset=True) @@ -567,7 +575,7 @@ async def read_flows( } response = await self._client.post("/flows/filter", json=body) - return pydantic.TypeAdapter(List[Flow]).validate_python(response.json()) + return pydantic.TypeAdapter(list[Flow]).validate_python(response.json()) async def read_flow_by_name( self, @@ -589,15 +597,15 @@ async def create_flow_run_from_deployment( self, deployment_id: UUID, *, - parameters: Optional[Dict[str, Any]] = None, - context: Optional[Dict[str, Any]] = None, - state: Optional[prefect.states.State] = None, + parameters: Optional[dict[str, Any]] = None, + context: Optional[dict[str, Any]] = None, + state: Optional[prefect.states.State[Any]] = None, name: Optional[str] = None, tags: Optional[Iterable[str]] = None, idempotency_key: Optional[str] = None, parent_task_run_id: Optional[UUID] = None, work_queue_name: Optional[str] = None, - job_variables: Optional[Dict[str, Any]] = None, + job_variables: Optional[dict[str, Any]] = None, ) -> FlowRun: """ Create a flow run for a deployment. @@ -638,7 +646,7 @@ async def create_flow_run_from_deployment( parameters=parameters, context=context, state=state.to_state_create(), - tags=tags, + tags=list(tags), name=name, idempotency_key=idempotency_key, parent_task_run_id=parent_task_run_id, @@ -657,13 +665,13 @@ async def create_flow_run_from_deployment( async def create_flow_run( self, - flow: "FlowObject", + flow: "FlowObject[Any, R]", name: Optional[str] = None, - parameters: Optional[Dict[str, Any]] = None, - context: Optional[Dict[str, Any]] = None, + parameters: Optional[dict[str, Any]] = None, + context: Optional[dict[str, Any]] = None, tags: Optional[Iterable[str]] = None, parent_task_run_id: Optional[UUID] = None, - state: Optional["prefect.states.State"] = None, + state: Optional["prefect.states.State[R]"] = None, ) -> FlowRun: """ Create a flow run for a flow. @@ -705,7 +713,7 @@ async def create_flow_run( state=state.to_state_create(), empirical_policy=FlowRunPolicy( retries=flow.retries, - retry_delay=flow.retry_delay_seconds, + retry_delay=int(flow.retry_delay_seconds or 0), ), ) @@ -723,12 +731,12 @@ async def update_flow_run( self, flow_run_id: UUID, flow_version: Optional[str] = None, - parameters: Optional[dict] = None, + parameters: Optional[dict[str, Any]] = None, name: Optional[str] = None, tags: Optional[Iterable[str]] = None, empirical_policy: Optional[FlowRunPolicy] = None, infrastructure_pid: Optional[str] = None, - job_variables: Optional[dict] = None, + job_variables: Optional[dict[str, Any]] = None, ) -> httpx.Response: """ Update a flow run's details. @@ -749,7 +757,7 @@ async def update_flow_run( Returns: an `httpx.Response` object from the PATCH request """ - params = {} + params: dict[str, Any] = {} if flow_version is not None: params["flow_version"] = flow_version if parameters is not None: @@ -832,7 +840,7 @@ async def create_concurrency_limit( async def read_concurrency_limit_by_tag( self, tag: str, - ): + ) -> ConcurrencyLimit: """ Read the concurrency limit set on a specific tag. @@ -868,7 +876,7 @@ async def read_concurrency_limits( self, limit: int, offset: int, - ): + ) -> list[ConcurrencyLimit]: """ Lists concurrency limits set on task run tags. @@ -886,15 +894,15 @@ async def read_concurrency_limits( } response = await self._client.post("/concurrency_limits/filter", json=body) - return pydantic.TypeAdapter(List[ConcurrencyLimit]).validate_python( + return pydantic.TypeAdapter(list[ConcurrencyLimit]).validate_python( response.json() ) async def reset_concurrency_limit_by_tag( self, tag: str, - slot_override: Optional[List[Union[UUID, str]]] = None, - ): + slot_override: Optional[list[Union[UUID, str]]] = None, + ) -> None: """ Resets the concurrency limit slots set on a specific tag. @@ -927,7 +935,7 @@ async def reset_concurrency_limit_by_tag( async def delete_concurrency_limit_by_tag( self, tag: str, - ): + ) -> None: """ Delete the concurrency limit set on a specific tag. @@ -951,7 +959,7 @@ async def delete_concurrency_limit_by_tag( async def increment_v1_concurrency_slots( self, - names: List[str], + names: list[str], task_run_id: UUID, ) -> httpx.Response: """ @@ -961,7 +969,7 @@ async def increment_v1_concurrency_slots( names (List[str]): A list of limit names for which to increment limits. task_run_id (UUID): The task run ID incrementing the limits. """ - data = { + data: dict[str, Any] = { "names": names, "task_run_id": str(task_run_id), } @@ -973,7 +981,7 @@ async def increment_v1_concurrency_slots( async def decrement_v1_concurrency_slots( self, - names: List[str], + names: list[str], task_run_id: UUID, occupancy_seconds: float, ) -> httpx.Response: @@ -989,7 +997,7 @@ async def decrement_v1_concurrency_slots( Returns: httpx.Response: The HTTP response from the server. """ - data = { + data: dict[str, Any] = { "names": names, "task_run_id": str(task_run_id), "occupancy_seconds": occupancy_seconds, @@ -1089,7 +1097,7 @@ async def read_work_queue_by_name( return WorkQueue.model_validate(response.json()) - async def update_work_queue(self, id: UUID, **kwargs): + async def update_work_queue(self, id: UUID, **kwargs: Any) -> None: """ Update properties of a work queue. @@ -1119,8 +1127,8 @@ async def get_runs_in_work_queue( self, id: UUID, limit: int = 10, - scheduled_before: datetime.datetime = None, - ) -> List[FlowRun]: + scheduled_before: Optional[datetime.datetime] = None, + ) -> list[FlowRun]: """ Read flow runs off a work queue. @@ -1153,7 +1161,7 @@ async def get_runs_in_work_queue( raise prefect.exceptions.ObjectNotFound(http_exc=e) from e else: raise - return pydantic.TypeAdapter(List[FlowRun]).validate_python(response.json()) + return pydantic.TypeAdapter(list[FlowRun]).validate_python(response.json()) async def read_work_queue( self, @@ -1209,9 +1217,9 @@ async def read_work_queue_status( async def match_work_queues( self, - prefixes: List[str], + prefixes: list[str], work_pool_name: Optional[str] = None, - ) -> List[WorkQueue]: + ) -> list[WorkQueue]: """ Query the Prefect API for work queues with names with a specific prefix. @@ -1225,7 +1233,7 @@ async def match_work_queues( """ page_length = 100 current_page = 0 - work_queues = [] + work_queues: list[WorkQueue] = [] while True: new_queues = await self.read_work_queues( @@ -1246,7 +1254,7 @@ async def match_work_queues( async def delete_work_queue_by_id( self, id: UUID, - ): + ) -> None: """ Delete a work queue by its ID. @@ -1343,7 +1351,7 @@ async def update_block_document( self, block_document_id: UUID, block_document: BlockDocumentUpdate, - ): + ) -> None: """ Update a block document in the Prefect API. """ @@ -1362,7 +1370,7 @@ async def update_block_document( else: raise - async def delete_block_document(self, block_document_id: UUID): + async def delete_block_document(self, block_document_id: UUID) -> None: """ Delete a block document. """ @@ -1405,7 +1413,9 @@ async def read_block_schema_by_checksum( raise return BlockSchema.model_validate(response.json()) - async def update_block_type(self, block_type_id: UUID, block_type: BlockTypeUpdate): + async def update_block_type( + self, block_type_id: UUID, block_type: BlockTypeUpdate + ) -> None: """ Update a block document in the Prefect API. """ @@ -1424,7 +1434,7 @@ async def update_block_type(self, block_type_id: UUID, block_type: BlockTypeUpda else: raise - async def delete_block_type(self, block_type_id: UUID): + async def delete_block_type(self, block_type_id: UUID) -> None: """ Delete a block type. """ @@ -1444,7 +1454,7 @@ async def delete_block_type(self, block_type_id: UUID): else: raise - async def read_block_types(self) -> List[BlockType]: + async def read_block_types(self) -> list[BlockType]: """ Read all block types Raises: @@ -1454,9 +1464,9 @@ async def read_block_types(self) -> List[BlockType]: List of BlockTypes. """ response = await self._client.post("/block_types/filter", json={}) - return pydantic.TypeAdapter(List[BlockType]).validate_python(response.json()) + return pydantic.TypeAdapter(list[BlockType]).validate_python(response.json()) - async def read_block_schemas(self) -> List[BlockSchema]: + async def read_block_schemas(self) -> list[BlockSchema]: """ Read all block schemas Raises: @@ -1466,7 +1476,7 @@ async def read_block_schemas(self) -> List[BlockSchema]: A BlockSchema. """ response = await self._client.post("/block_schemas/filter", json={}) - return pydantic.TypeAdapter(List[BlockSchema]).validate_python(response.json()) + return pydantic.TypeAdapter(list[BlockSchema]).validate_python(response.json()) async def get_most_recent_block_schema_for_block_type( self, @@ -1502,7 +1512,7 @@ async def read_block_document( self, block_document_id: UUID, include_secrets: bool = True, - ): + ) -> BlockDocument: """ Read the block document with the specified ID. @@ -1580,7 +1590,7 @@ async def read_block_documents( offset: Optional[int] = None, limit: Optional[int] = None, include_secrets: bool = True, - ): + ) -> list[BlockDocument]: """ Read block documents @@ -1607,7 +1617,7 @@ async def read_block_documents( include_secrets=include_secrets, ), ) - return pydantic.TypeAdapter(List[BlockDocument]).validate_python( + return pydantic.TypeAdapter(list[BlockDocument]).validate_python( response.json() ) @@ -1617,7 +1627,7 @@ async def read_block_documents_by_type( offset: Optional[int] = None, limit: Optional[int] = None, include_secrets: bool = True, - ) -> List[BlockDocument]: + ) -> list[BlockDocument]: """Retrieve block documents by block type slug. Args: @@ -1638,7 +1648,7 @@ async def read_block_documents_by_type( ), ) - return pydantic.TypeAdapter(List[BlockDocument]).validate_python( + return pydantic.TypeAdapter(list[BlockDocument]).validate_python( response.json() ) @@ -1647,23 +1657,23 @@ async def create_deployment( flow_id: UUID, name: str, version: Optional[str] = None, - schedules: Optional[List[DeploymentScheduleCreate]] = None, + schedules: Optional[list[DeploymentScheduleCreate]] = None, concurrency_limit: Optional[int] = None, concurrency_options: Optional[ConcurrencyOptions] = None, - parameters: Optional[Dict[str, Any]] = None, + parameters: Optional[dict[str, Any]] = None, description: Optional[str] = None, work_queue_name: Optional[str] = None, work_pool_name: Optional[str] = None, - tags: Optional[List[str]] = None, + tags: Optional[list[str]] = None, storage_document_id: Optional[UUID] = None, path: Optional[str] = None, entrypoint: Optional[str] = None, infrastructure_document_id: Optional[UUID] = None, - parameter_openapi_schema: Optional[Dict[str, Any]] = None, + parameter_openapi_schema: Optional[dict[str, Any]] = None, paused: Optional[bool] = None, - pull_steps: Optional[List[dict]] = None, + pull_steps: Optional[list[dict[str, Any]]] = None, enforce_parameter_schema: Optional[bool] = None, - job_variables: Optional[Dict[str, Any]] = None, + job_variables: Optional[dict[str, Any]] = None, ) -> UUID: """ Create a deployment. @@ -1743,7 +1753,9 @@ async def create_deployment( return UUID(deployment_id) - async def set_deployment_paused_state(self, deployment_id: UUID, paused: bool): + async def set_deployment_paused_state( + self, deployment_id: UUID, paused: bool + ) -> None: await self._client.patch( f"/deployments/{deployment_id}", json={"paused": paused} ) @@ -1752,7 +1764,7 @@ async def update_deployment( self, deployment_id: UUID, deployment: DeploymentUpdate, - ): + ) -> None: await self._client.patch( f"/deployments/{deployment_id}", json=deployment.model_dump(mode="json", exclude_unset=True), @@ -1775,7 +1787,7 @@ async def _create_deployment_from_schema(self, schema: DeploymentCreate) -> UUID async def read_deployment( self, - deployment_id: UUID, + deployment_id: Union[UUID, str], ) -> DeploymentResponse: """ Query the Prefect API for a deployment by id. @@ -1868,7 +1880,7 @@ async def read_deployments( limit: Optional[int] = None, sort: Optional[DeploymentSort] = None, offset: int = 0, - ) -> List[DeploymentResponse]: + ) -> list[DeploymentResponse]: """ Query the Prefect API for deployments. Only deployments matching all the provided criteria will be returned. @@ -1887,7 +1899,7 @@ async def read_deployments( a list of Deployment model representations of the deployments """ - body = { + body: dict[str, Any] = { "flows": flow_filter.model_dump(mode="json") if flow_filter else None, "flow_runs": ( flow_run_filter.model_dump(mode="json", exclude_unset=True) @@ -1912,14 +1924,14 @@ async def read_deployments( } response = await self._client.post("/deployments/filter", json=body) - return pydantic.TypeAdapter(List[DeploymentResponse]).validate_python( + return pydantic.TypeAdapter(list[DeploymentResponse]).validate_python( response.json() ) async def delete_deployment( self, deployment_id: UUID, - ): + ) -> None: """ Delete deployment by id. @@ -1940,8 +1952,8 @@ async def delete_deployment( async def create_deployment_schedules( self, deployment_id: UUID, - schedules: List[Tuple[SCHEDULE_TYPES, bool]], - ) -> List[DeploymentSchedule]: + schedules: list[tuple[SCHEDULE_TYPES, bool]], + ) -> list[DeploymentSchedule]: """ Create deployment schedules. @@ -1968,14 +1980,14 @@ async def create_deployment_schedules( response = await self._client.post( f"/deployments/{deployment_id}/schedules", json=json ) - return pydantic.TypeAdapter(List[DeploymentSchedule]).validate_python( + return pydantic.TypeAdapter(list[DeploymentSchedule]).validate_python( response.json() ) async def read_deployment_schedules( self, deployment_id: UUID, - ) -> List[DeploymentSchedule]: + ) -> list[DeploymentSchedule]: """ Query the Prefect API for a deployment's schedules. @@ -1992,7 +2004,7 @@ async def read_deployment_schedules( raise prefect.exceptions.ObjectNotFound(http_exc=e) from e else: raise - return pydantic.TypeAdapter(List[DeploymentSchedule]).validate_python( + return pydantic.TypeAdapter(list[DeploymentSchedule]).validate_python( response.json() ) @@ -2002,7 +2014,7 @@ async def update_deployment_schedule( schedule_id: UUID, active: Optional[bool] = None, schedule: Optional[SCHEDULE_TYPES] = None, - ): + ) -> None: """ Update a deployment schedule by ID. @@ -2012,7 +2024,7 @@ async def update_deployment_schedule( active: whether or not the schedule should be active schedule: the cron, rrule, or interval schedule this deployment schedule should use """ - kwargs = {} + kwargs: dict[str, Any] = {} if active is not None: kwargs["active"] = active if schedule is not None: @@ -2076,8 +2088,8 @@ async def read_flow_run(self, flow_run_id: UUID) -> FlowRun: return FlowRun.model_validate(response.json()) async def resume_flow_run( - self, flow_run_id: UUID, run_input: Optional[Dict] = None - ) -> OrchestrationResult: + self, flow_run_id: UUID, run_input: Optional[dict[str, Any]] = None + ) -> OrchestrationResult[Any]: """ Resumes a paused flow run. @@ -2095,21 +2107,24 @@ async def resume_flow_run( except httpx.HTTPStatusError: raise - return OrchestrationResult.model_validate(response.json()) + result: OrchestrationResult[Any] = OrchestrationResult.model_validate( + response.json() + ) + return result async def read_flow_runs( self, *, - flow_filter: FlowFilter = None, - flow_run_filter: FlowRunFilter = None, - task_run_filter: TaskRunFilter = None, - deployment_filter: DeploymentFilter = None, - work_pool_filter: WorkPoolFilter = None, - work_queue_filter: WorkQueueFilter = None, - sort: FlowRunSort = None, + flow_filter: Optional[FlowFilter] = None, + flow_run_filter: Optional[FlowRunFilter] = None, + task_run_filter: Optional[TaskRunFilter] = None, + deployment_filter: Optional[DeploymentFilter] = None, + work_pool_filter: Optional[WorkPoolFilter] = None, + work_queue_filter: Optional[WorkQueueFilter] = None, + sort: Optional[FlowRunSort] = None, limit: Optional[int] = None, offset: int = 0, - ) -> List[FlowRun]: + ) -> list[FlowRun]: """ Query the Prefect API for flow runs. Only flow runs matching all criteria will be returned. @@ -2129,7 +2144,7 @@ async def read_flow_runs( a list of Flow Run model representations of the flow runs """ - body = { + body: dict[str, Any] = { "flows": flow_filter.model_dump(mode="json") if flow_filter else None, "flow_runs": ( flow_run_filter.model_dump(mode="json", exclude_unset=True) @@ -2154,14 +2169,14 @@ async def read_flow_runs( } response = await self._client.post("/flow_runs/filter", json=body) - return pydantic.TypeAdapter(List[FlowRun]).validate_python(response.json()) + return pydantic.TypeAdapter(list[FlowRun]).validate_python(response.json()) async def set_flow_run_state( self, - flow_run_id: UUID, - state: "prefect.states.State", + flow_run_id: Union[UUID, str], + state: "prefect.states.State[T]", force: bool = False, - ) -> OrchestrationResult: + ) -> OrchestrationResult[T]: """ Set the state of a flow run. @@ -2194,11 +2209,14 @@ async def set_flow_run_state( else: raise - return OrchestrationResult.model_validate(response.json()) + result: OrchestrationResult[T] = OrchestrationResult.model_validate( + response.json() + ) + return result async def read_flow_run_states( self, flow_run_id: UUID - ) -> List[prefect.states.State]: + ) -> list[prefect.states.State]: """ Query for the states of a flow run @@ -2212,18 +2230,18 @@ async def read_flow_run_states( response = await self._client.get( "/flow_run_states/", params=dict(flow_run_id=str(flow_run_id)) ) - return pydantic.TypeAdapter(List[prefect.states.State]).validate_python( + return pydantic.TypeAdapter(list[prefect.states.State]).validate_python( response.json() ) - async def set_flow_run_name(self, flow_run_id: UUID, name: str): + async def set_flow_run_name(self, flow_run_id: UUID, name: str) -> httpx.Response: flow_run_data = FlowRunUpdate(name=name) return await self._client.patch( f"/flow_runs/{flow_run_id}", json=flow_run_data.model_dump(mode="json", exclude_unset=True), ) - async def set_task_run_name(self, task_run_id: UUID, name: str): + async def set_task_run_name(self, task_run_id: UUID, name: str) -> httpx.Response: task_run_data = TaskRunUpdate(name=name) return await self._client.patch( f"/task_runs/{task_run_id}", @@ -2240,9 +2258,9 @@ async def create_task_run( extra_tags: Optional[Iterable[str]] = None, state: Optional[prefect.states.State[R]] = None, task_inputs: Optional[ - Dict[ + dict[ str, - List[ + list[ Union[ TaskRunResult, Parameter, @@ -2276,6 +2294,12 @@ async def create_task_run( if state is None: state = prefect.states.Pending() + retry_delay = task.retry_delay_seconds + if isinstance(retry_delay, list): + retry_delay = [int(rd) for rd in retry_delay] + elif isinstance(retry_delay, float): + retry_delay = int(retry_delay) + task_run_data = TaskRunCreate( id=id, name=name, @@ -2286,7 +2310,7 @@ async def create_task_run( task_version=task.version, empirical_policy=TaskRunPolicy( retries=task.retries, - retry_delay=task.retry_delay_seconds, + retry_delay=retry_delay, retry_jitter_factor=task.retry_jitter_factor, ), state=state.to_state_create(), @@ -2319,14 +2343,14 @@ async def read_task_run(self, task_run_id: UUID) -> TaskRun: async def read_task_runs( self, *, - flow_filter: FlowFilter = None, - flow_run_filter: FlowRunFilter = None, - task_run_filter: TaskRunFilter = None, - deployment_filter: DeploymentFilter = None, - sort: TaskRunSort = None, + flow_filter: Optional[FlowFilter] = None, + flow_run_filter: Optional[FlowRunFilter] = None, + task_run_filter: Optional[TaskRunFilter] = None, + deployment_filter: Optional[DeploymentFilter] = None, + sort: Optional[TaskRunSort] = None, limit: Optional[int] = None, offset: int = 0, - ) -> List[TaskRun]: + ) -> list[TaskRun]: """ Query the Prefect API for task runs. Only task runs matching all criteria will be returned. @@ -2344,7 +2368,7 @@ async def read_task_runs( a list of Task Run model representations of the task runs """ - body = { + body: dict[str, Any] = { "flows": flow_filter.model_dump(mode="json") if flow_filter else None, "flow_runs": ( flow_run_filter.model_dump(mode="json", exclude_unset=True) @@ -2362,7 +2386,7 @@ async def read_task_runs( "offset": offset, } response = await self._client.post("/task_runs/filter", json=body) - return pydantic.TypeAdapter(List[TaskRun]).validate_python(response.json()) + return pydantic.TypeAdapter(list[TaskRun]).validate_python(response.json()) async def delete_task_run(self, task_run_id: UUID) -> None: """ @@ -2385,9 +2409,9 @@ async def delete_task_run(self, task_run_id: UUID) -> None: async def set_task_run_state( self, task_run_id: UUID, - state: prefect.states.State, + state: prefect.states.State[T], force: bool = False, - ) -> OrchestrationResult: + ) -> OrchestrationResult[T]: """ Set the state of a task run. @@ -2406,11 +2430,14 @@ async def set_task_run_state( f"/task_runs/{task_run_id}/set_state", json=dict(state=state_create.model_dump(mode="json"), force=force), ) - return OrchestrationResult.model_validate(response.json()) + result: OrchestrationResult[T] = OrchestrationResult.model_validate( + response.json() + ) + return result async def read_task_run_states( self, task_run_id: UUID - ) -> List[prefect.states.State]: + ) -> list[prefect.states.State]: """ Query for the states of a task run @@ -2423,11 +2450,13 @@ async def read_task_run_states( response = await self._client.get( "/task_run_states/", params=dict(task_run_id=str(task_run_id)) ) - return pydantic.TypeAdapter(List[prefect.states.State]).validate_python( + return pydantic.TypeAdapter(list[prefect.states.State]).validate_python( response.json() ) - async def create_logs(self, logs: Iterable[Union[LogCreate, dict]]) -> None: + async def create_logs( + self, logs: Iterable[Union[LogCreate, dict[str, Any]]] + ) -> None: """ Create logs for a flow or task run @@ -2444,8 +2473,8 @@ async def create_flow_run_notification_policy( self, block_document_id: UUID, is_active: bool = True, - tags: List[str] = None, - state_names: List[str] = None, + tags: Optional[list[str]] = None, + state_names: Optional[list[str]] = None, message_template: Optional[str] = None, ) -> UUID: """ @@ -2507,8 +2536,8 @@ async def update_flow_run_notification_policy( id: UUID, block_document_id: Optional[UUID] = None, is_active: Optional[bool] = None, - tags: Optional[List[str]] = None, - state_names: Optional[List[str]] = None, + tags: Optional[list[str]] = None, + state_names: Optional[list[str]] = None, message_template: Optional[str] = None, ) -> None: """ @@ -2525,7 +2554,7 @@ async def update_flow_run_notification_policy( prefect.exceptions.ObjectNotFound: If request returns 404 httpx.RequestError: If requests fails """ - params = {} + params: dict[str, Any] = {} if block_document_id is not None: params["block_document_id"] = block_document_id if is_active is not None: @@ -2555,7 +2584,7 @@ async def read_flow_run_notification_policies( flow_run_notification_policy_filter: FlowRunNotificationPolicyFilter, limit: Optional[int] = None, offset: int = 0, - ) -> List[FlowRunNotificationPolicy]: + ) -> list[FlowRunNotificationPolicy]: """ Query the Prefect API for flow run notification policies. Only policies matching all criteria will be returned. @@ -2569,7 +2598,7 @@ async def read_flow_run_notification_policies( a list of FlowRunNotificationPolicy model representations of the notification policies """ - body = { + body: dict[str, Any] = { "flow_run_notification_policy_filter": ( flow_run_notification_policy_filter.model_dump(mode="json") if flow_run_notification_policy_filter @@ -2581,7 +2610,7 @@ async def read_flow_run_notification_policies( response = await self._client.post( "/flow_run_notification_policies/filter", json=body ) - return pydantic.TypeAdapter(List[FlowRunNotificationPolicy]).validate_python( + return pydantic.TypeAdapter(list[FlowRunNotificationPolicy]).validate_python( response.json() ) @@ -2591,11 +2620,11 @@ async def read_logs( limit: Optional[int] = None, offset: Optional[int] = None, sort: LogSort = LogSort.TIMESTAMP_ASC, - ) -> List[Log]: + ) -> list[Log]: """ Read flow and task run logs. """ - body = { + body: dict[str, Any] = { "logs": log_filter.model_dump(mode="json") if log_filter else None, "limit": limit, "offset": offset, @@ -2603,7 +2632,7 @@ async def read_logs( } response = await self._client.post("/logs/filter", json=body) - return pydantic.TypeAdapter(List[Log]).validate_python(response.json()) + return pydantic.TypeAdapter(list[Log]).validate_python(response.json()) async def send_worker_heartbeat( self, @@ -2622,7 +2651,7 @@ async def send_worker_heartbeat( return_id: Whether to return the worker ID. Note: will return `None` if the connected server does not support returning worker IDs, even if `return_id` is `True`. worker_metadata: Metadata about the worker to send to the server. """ - params = { + params: dict[str, Any] = { "name": worker_name, "heartbeat_interval_seconds": heartbeat_interval_seconds, } @@ -2654,7 +2683,7 @@ async def read_workers_for_work_pool( worker_filter: Optional[WorkerFilter] = None, offset: Optional[int] = None, limit: Optional[int] = None, - ) -> List[Worker]: + ) -> list[Worker]: """ Reads workers for a given work pool. @@ -2678,7 +2707,7 @@ async def read_workers_for_work_pool( }, ) - return pydantic.TypeAdapter(List[Worker]).validate_python(response.json()) + return pydantic.TypeAdapter(list[Worker]).validate_python(response.json()) async def read_work_pool(self, work_pool_name: str) -> WorkPool: """ @@ -2705,7 +2734,7 @@ async def read_work_pools( limit: Optional[int] = None, offset: int = 0, work_pool_filter: Optional[WorkPoolFilter] = None, - ) -> List[WorkPool]: + ) -> list[WorkPool]: """ Reads work pools. @@ -2718,7 +2747,7 @@ async def read_work_pools( A list of work pools. """ - body = { + body: dict[str, Any] = { "limit": limit, "offset": offset, "work_pools": ( @@ -2726,7 +2755,7 @@ async def read_work_pools( ), } response = await self._client.post("/work_pools/filter", json=body) - return pydantic.TypeAdapter(List[WorkPool]).validate_python(response.json()) + return pydantic.TypeAdapter(list[WorkPool]).validate_python(response.json()) async def create_work_pool( self, @@ -2776,7 +2805,7 @@ async def update_work_pool( self, work_pool_name: str, work_pool: WorkPoolUpdate, - ): + ) -> None: """ Updates a work pool. @@ -2798,7 +2827,7 @@ async def update_work_pool( async def delete_work_pool( self, work_pool_name: str, - ): + ) -> None: """ Deletes a work pool. @@ -2819,7 +2848,7 @@ async def read_work_queues( work_queue_filter: Optional[WorkQueueFilter] = None, limit: Optional[int] = None, offset: Optional[int] = None, - ) -> List[WorkQueue]: + ) -> list[WorkQueue]: """ Retrieves queues for a work pool. @@ -2832,7 +2861,7 @@ async def read_work_queues( Returns: List of queues for the specified work pool. """ - json = { + json: dict[str, Any] = { "work_queues": ( work_queue_filter.model_dump(mode="json", exclude_unset=True) if work_queue_filter @@ -2856,15 +2885,15 @@ async def read_work_queues( else: response = await self._client.post("/work_queues/filter", json=json) - return pydantic.TypeAdapter(List[WorkQueue]).validate_python(response.json()) + return pydantic.TypeAdapter(list[WorkQueue]).validate_python(response.json()) async def get_scheduled_flow_runs_for_deployments( self, - deployment_ids: List[UUID], + deployment_ids: list[UUID], scheduled_before: Optional[datetime.datetime] = None, limit: Optional[int] = None, - ) -> List[FlowRunResponse]: - body: Dict[str, Any] = dict(deployment_ids=[str(id) for id in deployment_ids]) + ) -> list[FlowRunResponse]: + body: dict[str, Any] = dict(deployment_ids=[str(id) for id in deployment_ids]) if scheduled_before: body["scheduled_before"] = str(scheduled_before) if limit: @@ -2875,16 +2904,16 @@ async def get_scheduled_flow_runs_for_deployments( json=body, ) - return pydantic.TypeAdapter(List[FlowRunResponse]).validate_python( + return pydantic.TypeAdapter(list[FlowRunResponse]).validate_python( response.json() ) async def get_scheduled_flow_runs_for_work_pool( self, work_pool_name: str, - work_queue_names: Optional[List[str]] = None, + work_queue_names: Optional[list[str]] = None, scheduled_before: Optional[datetime.datetime] = None, - ) -> List[WorkerFlowRunResponse]: + ) -> list[WorkerFlowRunResponse]: """ Retrieves scheduled flow runs for the provided set of work pool queues. @@ -2900,7 +2929,7 @@ async def get_scheduled_flow_runs_for_work_pool( A list of worker flow run responses containing information about the retrieved flow runs. """ - body: Dict[str, Any] = {} + body: dict[str, Any] = {} if work_queue_names is not None: body["work_queue_names"] = list(work_queue_names) if scheduled_before: @@ -2910,7 +2939,7 @@ async def get_scheduled_flow_runs_for_work_pool( f"/work_pools/{work_pool_name}/get_scheduled_flow_runs", json=body, ) - return pydantic.TypeAdapter(List[WorkerFlowRunResponse]).validate_python( + return pydantic.TypeAdapter(list[WorkerFlowRunResponse]).validate_python( response.json() ) @@ -2956,13 +2985,13 @@ async def update_artifact( async def read_artifacts( self, *, - artifact_filter: ArtifactFilter = None, - flow_run_filter: FlowRunFilter = None, - task_run_filter: TaskRunFilter = None, - sort: ArtifactSort = None, + artifact_filter: Optional[ArtifactFilter] = None, + flow_run_filter: Optional[FlowRunFilter] = None, + task_run_filter: Optional[TaskRunFilter] = None, + sort: Optional[ArtifactSort] = None, limit: Optional[int] = None, offset: int = 0, - ) -> List[Artifact]: + ) -> list[Artifact]: """ Query the Prefect API for artifacts. Only artifacts matching all criteria will be returned. @@ -2976,7 +3005,7 @@ async def read_artifacts( Returns: a list of Artifact model representations of the artifacts """ - body = { + body: dict[str, Any] = { "artifacts": ( artifact_filter.model_dump(mode="json") if artifact_filter else None ), @@ -2991,18 +3020,18 @@ async def read_artifacts( "offset": offset, } response = await self._client.post("/artifacts/filter", json=body) - return pydantic.TypeAdapter(List[Artifact]).validate_python(response.json()) + return pydantic.TypeAdapter(list[Artifact]).validate_python(response.json()) async def read_latest_artifacts( self, *, - artifact_filter: ArtifactCollectionFilter = None, - flow_run_filter: FlowRunFilter = None, - task_run_filter: TaskRunFilter = None, - sort: ArtifactCollectionSort = None, + artifact_filter: Optional[ArtifactCollectionFilter] = None, + flow_run_filter: Optional[FlowRunFilter] = None, + task_run_filter: Optional[TaskRunFilter] = None, + sort: Optional[ArtifactCollectionSort] = None, limit: Optional[int] = None, offset: int = 0, - ) -> List[ArtifactCollection]: + ) -> list[ArtifactCollection]: """ Query the Prefect API for artifacts. Only artifacts matching all criteria will be returned. @@ -3016,7 +3045,7 @@ async def read_latest_artifacts( Returns: a list of Artifact model representations of the artifacts """ - body = { + body: dict[str, Any] = { "artifacts": ( artifact_filter.model_dump(mode="json") if artifact_filter else None ), @@ -3031,7 +3060,7 @@ async def read_latest_artifacts( "offset": offset, } response = await self._client.post("/artifacts/latest/filter", json=body) - return pydantic.TypeAdapter(List[ArtifactCollection]).validate_python( + return pydantic.TypeAdapter(list[ArtifactCollection]).validate_python( response.json() ) @@ -3090,7 +3119,7 @@ async def read_variable_by_name(self, name: str) -> Optional[Variable]: else: raise - async def delete_variable_by_name(self, name: str): + async def delete_variable_by_name(self, name: str) -> None: """Deletes a variable by name.""" try: await self._client.delete(f"/variables/name/{name}") @@ -3100,12 +3129,12 @@ async def delete_variable_by_name(self, name: str): else: raise - async def read_variables(self, limit: Optional[int] = None) -> List[Variable]: + async def read_variables(self, limit: Optional[int] = None) -> list[Variable]: """Reads all variables.""" response = await self._client.post("/variables/filter", json={"limit": limit}) - return pydantic.TypeAdapter(List[Variable]).validate_python(response.json()) + return pydantic.TypeAdapter(list[Variable]).validate_python(response.json()) - async def read_worker_metadata(self) -> Dict[str, Any]: + async def read_worker_metadata(self) -> dict[str, Any]: """Reads worker metadata stored in Prefect collection registry.""" response = await self._client.get("collections/views/aggregate-worker-metadata") response.raise_for_status() @@ -3113,7 +3142,7 @@ async def read_worker_metadata(self) -> Dict[str, Any]: async def increment_concurrency_slots( self, - names: List[str], + names: list[str], slots: int, mode: str, create_if_missing: Optional[bool] = None, @@ -3129,7 +3158,7 @@ async def increment_concurrency_slots( ) async def release_concurrency_slots( - self, names: List[str], slots: int, occupancy_seconds: float + self, names: list[str], slots: int, occupancy_seconds: float ) -> httpx.Response: """ Release concurrency slots for the specified limits. @@ -3201,7 +3230,9 @@ async def read_global_concurrency_limit_by_name( else: raise - async def upsert_global_concurrency_limit_by_name(self, name: str, limit: int): + async def upsert_global_concurrency_limit_by_name( + self, name: str, limit: int + ) -> None: """Creates a global concurrency limit with the given name and limit if one does not already exist. If one does already exist matching the name then update it's limit if it is different. @@ -3227,7 +3258,7 @@ async def upsert_global_concurrency_limit_by_name(self, name: str, limit: int): async def read_global_concurrency_limits( self, limit: int = 10, offset: int = 0 - ) -> List[GlobalConcurrencyLimitResponse]: + ) -> list[GlobalConcurrencyLimitResponse]: response = await self._client.post( "/v2/concurrency_limits/filter", json={ @@ -3236,12 +3267,12 @@ async def read_global_concurrency_limits( }, ) return pydantic.TypeAdapter( - List[GlobalConcurrencyLimitResponse] + list[GlobalConcurrencyLimitResponse] ).validate_python(response.json()) async def create_flow_run_input( self, flow_run_id: UUID, key: str, value: str, sender: Optional[str] = None - ): + ) -> None: """ Creates a flow run input. @@ -3262,8 +3293,8 @@ async def create_flow_run_input( response.raise_for_status() async def filter_flow_run_input( - self, flow_run_id: UUID, key_prefix: str, limit: int, exclude_keys: Set[str] - ) -> List[FlowRunInput]: + self, flow_run_id: UUID, key_prefix: str, limit: int, exclude_keys: set[str] + ) -> list[FlowRunInput]: response = await self._client.post( f"/flow_runs/{flow_run_id}/input/filter", json={ @@ -3273,7 +3304,7 @@ async def filter_flow_run_input( }, ) response.raise_for_status() - return pydantic.TypeAdapter(List[FlowRunInput]).validate_python(response.json()) + return pydantic.TypeAdapter(list[FlowRunInput]).validate_python(response.json()) async def read_flow_run_input(self, flow_run_id: UUID, key: str) -> str: """ @@ -3287,7 +3318,7 @@ async def read_flow_run_input(self, flow_run_id: UUID, key: str) -> str: response.raise_for_status() return response.content.decode() - async def delete_flow_run_input(self, flow_run_id: UUID, key: str): + async def delete_flow_run_input(self, flow_run_id: UUID, key: str) -> None: """ Deletes a flow run input. @@ -3307,7 +3338,9 @@ async def create_automation(self, automation: AutomationCore) -> UUID: return UUID(response.json()["id"]) - async def update_automation(self, automation_id: UUID, automation: AutomationCore): + async def update_automation( + self, automation_id: UUID, automation: AutomationCore + ) -> None: """Updates an automation in Prefect Cloud.""" response = await self._client.put( f"/automations/{automation_id}", @@ -3315,21 +3348,23 @@ async def update_automation(self, automation_id: UUID, automation: AutomationCor ) response.raise_for_status - async def read_automations(self) -> List[Automation]: + async def read_automations(self) -> list[Automation]: response = await self._client.post("/automations/filter") response.raise_for_status() - return pydantic.TypeAdapter(List[Automation]).validate_python(response.json()) + return pydantic.TypeAdapter(list[Automation]).validate_python(response.json()) async def find_automation( self, id_or_name: Union[str, UUID] ) -> Optional[Automation]: if isinstance(id_or_name, str): + name = id_or_name try: id = UUID(id_or_name) except ValueError: id = None - elif isinstance(id_or_name, UUID): + else: id = id_or_name + name = str(id) if id: try: @@ -3343,24 +3378,26 @@ async def find_automation( # Look for it by an exact name for automation in automations: - if automation.name == id_or_name: + if automation.name == name: return automation # Look for it by a case-insensitive name for automation in automations: - if automation.name.lower() == id_or_name.lower(): + if automation.name.lower() == name.lower(): return automation return None - async def read_automation(self, automation_id: UUID) -> Optional[Automation]: + async def read_automation( + self, automation_id: Union[UUID, str] + ) -> Optional[Automation]: response = await self._client.get(f"/automations/{automation_id}") if response.status_code == 404: return None response.raise_for_status() return Automation.model_validate(response.json()) - async def read_automations_by_name(self, name: str) -> List[Automation]: + async def read_automations_by_name(self, name: str) -> list[Automation]: """ Query the Prefect API for an automation by name. Only automations matching the provided name will be returned. @@ -3370,7 +3407,9 @@ async def read_automations_by_name(self, name: str) -> List[Automation]: Returns: a list of Automation model representations of the automations """ - automation_filter = filters.AutomationFilter(name=dict(any_=[name])) + automation_filter = filters.AutomationFilter( + name=filters.AutomationFilterName(any_=[name]) + ) response = await self._client.post( "/automations/filter", @@ -3384,21 +3423,21 @@ async def read_automations_by_name(self, name: str) -> List[Automation]: response.raise_for_status() - return pydantic.TypeAdapter(List[Automation]).validate_python(response.json()) + return pydantic.TypeAdapter(list[Automation]).validate_python(response.json()) - async def pause_automation(self, automation_id: UUID): + async def pause_automation(self, automation_id: UUID) -> None: response = await self._client.patch( f"/automations/{automation_id}", json={"enabled": False} ) response.raise_for_status() - async def resume_automation(self, automation_id: UUID): + async def resume_automation(self, automation_id: UUID) -> None: response = await self._client.patch( f"/automations/{automation_id}", json={"enabled": True} ) response.raise_for_status() - async def delete_automation(self, automation_id: UUID): + async def delete_automation(self, automation_id: UUID) -> None: response = await self._client.delete(f"/automations/{automation_id}") if response.status_code == 404: return @@ -3407,12 +3446,12 @@ async def delete_automation(self, automation_id: UUID): async def read_resource_related_automations( self, resource_id: str - ) -> List[Automation]: + ) -> list[Automation]: response = await self._client.get(f"/automations/related-to/{resource_id}") response.raise_for_status() - return pydantic.TypeAdapter(List[Automation]).validate_python(response.json()) + return pydantic.TypeAdapter(list[Automation]).validate_python(response.json()) - async def delete_resource_owned_automations(self, resource_id: str): + async def delete_resource_owned_automations(self, resource_id: str) -> None: await self._client.delete(f"/automations/owned-by/{resource_id}") async def api_version(self) -> str: @@ -3422,7 +3461,7 @@ async def api_version(self) -> str: def client_version(self) -> str: return prefect.__version__ - async def raise_for_api_version_mismatch(self): + async def raise_for_api_version_mismatch(self) -> None: # Cloud is always compatible as a server if self.server_type == ServerType.CLOUD: return @@ -3441,7 +3480,19 @@ async def raise_for_api_version_mismatch(self): f"Major versions must match." ) - async def __aenter__(self): + async def update_flow_run_labels( + self, flow_run_id: UUID, labels: KeyValueLabelsField + ) -> None: + """ + Updates the labels of a flow run. + """ + + response = await self._client.patch( + f"/flow_runs/{flow_run_id}/labels", json=labels + ) + response.raise_for_status() + + async def __aenter__(self) -> Self: """ Start the client. @@ -3488,7 +3539,7 @@ async def __aenter__(self): return self - async def __aexit__(self, *exc_info): + async def __aexit__(self, *exc_info: Any) -> Optional[bool]: """ Shutdown the client. """ @@ -3499,13 +3550,13 @@ async def __aexit__(self, *exc_info): self._closed = True return await self._exit_stack.__aexit__(*exc_info) - def __enter__(self): + def __enter__(self) -> NoReturn: raise RuntimeError( "The `PrefectClient` must be entered with an async context. Use 'async " "with PrefectClient(...)' not 'with PrefectClient(...)'" ) - def __exit__(self, *_): + def __exit__(self, *_: object) -> NoReturn: assert False, "This should never be called but must be defined for __enter__" @@ -3541,7 +3592,7 @@ def __init__( *, api_key: Optional[str] = None, api_version: Optional[str] = None, - httpx_settings: Optional[Dict[str, Any]] = None, + httpx_settings: Optional[dict[str, Any]] = None, server_type: Optional[ServerType] = None, ) -> None: httpx_settings = httpx_settings.copy() if httpx_settings else {} @@ -3617,16 +3668,10 @@ def __init__( ) # Connect to an in-process application - elif isinstance(api, ASGIApp): + else: self._ephemeral_app = api self.server_type = ServerType.EPHEMERAL - else: - raise TypeError( - f"Unexpected type {type(api).__name__!r} for argument `api`. Expected" - " 'str' or 'ASGIApp/FastAPI'" - ) - # See https://www.python-httpx.org/advanced/#timeout-configuration httpx_settings.setdefault( "timeout", @@ -3669,9 +3714,9 @@ def __init__( if isinstance(server_transport, httpx.HTTPTransport): pool = getattr(server_transport, "_pool", None) if isinstance(pool, httpcore.ConnectionPool): - pool._retries = 3 + setattr(pool, "_retries", 3) - self.logger = get_logger("client") + self.logger: Logger = get_logger("client") @property def api_url(self) -> httpx.URL: @@ -3709,7 +3754,7 @@ def __enter__(self) -> "SyncPrefectClient": return self - def __exit__(self, *exc_info) -> None: + def __exit__(self, *exc_info: Any) -> None: """ Shutdown the client. """ @@ -3747,7 +3792,7 @@ def api_version(self) -> str: def client_version(self) -> str: return prefect.__version__ - def raise_for_api_version_mismatch(self): + def raise_for_api_version_mismatch(self) -> None: # Cloud is always compatible as a server if self.server_type == ServerType.CLOUD: return @@ -3766,7 +3811,7 @@ def raise_for_api_version_mismatch(self): f"Major versions must match." ) - def create_flow(self, flow: "FlowObject") -> UUID: + def create_flow(self, flow: "FlowObject[Any, Any]") -> UUID: """ Create a flow in the Prefect API. @@ -3806,13 +3851,13 @@ def create_flow_from_name(self, flow_name: str) -> UUID: def create_flow_run( self, - flow: "FlowObject", + flow: "FlowObject[Any, R]", name: Optional[str] = None, - parameters: Optional[Dict[str, Any]] = None, - context: Optional[Dict[str, Any]] = None, + parameters: Optional[dict[str, Any]] = None, + context: Optional[dict[str, Any]] = None, tags: Optional[Iterable[str]] = None, parent_task_run_id: Optional[UUID] = None, - state: Optional["prefect.states.State"] = None, + state: Optional["prefect.states.State[R]"] = None, ) -> FlowRun: """ Create a flow run for a flow. @@ -3854,7 +3899,7 @@ def create_flow_run( state=state.to_state_create(), empirical_policy=FlowRunPolicy( retries=flow.retries, - retry_delay=flow.retry_delay_seconds, + retry_delay=int(flow.retry_delay_seconds or 0), ), ) @@ -3872,12 +3917,12 @@ def update_flow_run( self, flow_run_id: UUID, flow_version: Optional[str] = None, - parameters: Optional[dict] = None, + parameters: Optional[dict[str, Any]] = None, name: Optional[str] = None, tags: Optional[Iterable[str]] = None, empirical_policy: Optional[FlowRunPolicy] = None, infrastructure_pid: Optional[str] = None, - job_variables: Optional[dict] = None, + job_variables: Optional[dict[str, Any]] = None, ) -> httpx.Response: """ Update a flow run's details. @@ -3898,7 +3943,7 @@ def update_flow_run( Returns: an `httpx.Response` object from the PATCH request """ - params = {} + params: dict[str, Any] = {} if flow_version is not None: params["flow_version"] = flow_version if parameters is not None: @@ -3954,7 +3999,7 @@ def read_flow_runs( sort: Optional[FlowRunSort] = None, limit: Optional[int] = None, offset: int = 0, - ) -> List[FlowRun]: + ) -> list[FlowRun]: """ Query the Prefect API for flow runs. Only flow runs matching all criteria will be returned. @@ -3974,7 +4019,7 @@ def read_flow_runs( a list of Flow Run model representations of the flow runs """ - body = { + body: dict[str, Any] = { "flows": flow_filter.model_dump(mode="json") if flow_filter else None, "flow_runs": ( flow_run_filter.model_dump(mode="json", exclude_unset=True) @@ -3999,14 +4044,14 @@ def read_flow_runs( } response = self._client.post("/flow_runs/filter", json=body) - return pydantic.TypeAdapter(List[FlowRun]).validate_python(response.json()) + return pydantic.TypeAdapter(list[FlowRun]).validate_python(response.json()) def set_flow_run_state( self, flow_run_id: UUID, - state: "prefect.states.State", + state: "prefect.states.State[T]", force: bool = False, - ) -> OrchestrationResult: + ) -> OrchestrationResult[T]: """ Set the state of a flow run. @@ -4036,16 +4081,19 @@ def set_flow_run_state( else: raise - return OrchestrationResult.model_validate(response.json()) + result: OrchestrationResult[T] = OrchestrationResult.model_validate( + response.json() + ) + return result - def set_flow_run_name(self, flow_run_id: UUID, name: str): + def set_flow_run_name(self, flow_run_id: UUID, name: str) -> httpx.Response: flow_run_data = FlowRunUpdate(name=name) return self._client.patch( f"/flow_runs/{flow_run_id}", json=flow_run_data.model_dump(mode="json", exclude_unset=True), ) - def set_task_run_name(self, task_run_id: UUID, name: str): + def set_task_run_name(self, task_run_id: UUID, name: str) -> httpx.Response: task_run_data = TaskRunUpdate(name=name) return self._client.patch( f"/task_runs/{task_run_id}", @@ -4062,9 +4110,9 @@ def create_task_run( extra_tags: Optional[Iterable[str]] = None, state: Optional[prefect.states.State[R]] = None, task_inputs: Optional[ - Dict[ + dict[ str, - List[ + list[ Union[ TaskRunResult, Parameter, @@ -4098,6 +4146,12 @@ def create_task_run( if state is None: state = prefect.states.Pending() + retry_delay = task.retry_delay_seconds + if isinstance(retry_delay, list): + retry_delay = [int(rd) for rd in retry_delay] + elif isinstance(retry_delay, float): + retry_delay = int(retry_delay) + task_run_data = TaskRunCreate( id=id, name=name, @@ -4108,7 +4162,7 @@ def create_task_run( task_version=task.version, empirical_policy=TaskRunPolicy( retries=task.retries, - retry_delay=task.retry_delay_seconds, + retry_delay=retry_delay, retry_jitter_factor=task.retry_jitter_factor, ), state=state.to_state_create(), @@ -4142,14 +4196,14 @@ def read_task_run(self, task_run_id: UUID) -> TaskRun: def read_task_runs( self, *, - flow_filter: FlowFilter = None, - flow_run_filter: FlowRunFilter = None, - task_run_filter: TaskRunFilter = None, - deployment_filter: DeploymentFilter = None, - sort: TaskRunSort = None, + flow_filter: Optional[FlowFilter] = None, + flow_run_filter: Optional[FlowRunFilter] = None, + task_run_filter: Optional[TaskRunFilter] = None, + deployment_filter: Optional[DeploymentFilter] = None, + sort: Optional[TaskRunSort] = None, limit: Optional[int] = None, offset: int = 0, - ) -> List[TaskRun]: + ) -> list[TaskRun]: """ Query the Prefect API for task runs. Only task runs matching all criteria will be returned. @@ -4167,7 +4221,7 @@ def read_task_runs( a list of Task Run model representations of the task runs """ - body = { + body: dict[str, Any] = { "flows": flow_filter.model_dump(mode="json") if flow_filter else None, "flow_runs": ( flow_run_filter.model_dump(mode="json", exclude_unset=True) @@ -4185,14 +4239,14 @@ def read_task_runs( "offset": offset, } response = self._client.post("/task_runs/filter", json=body) - return pydantic.TypeAdapter(List[TaskRun]).validate_python(response.json()) + return pydantic.TypeAdapter(list[TaskRun]).validate_python(response.json()) def set_task_run_state( self, task_run_id: UUID, - state: prefect.states.State, + state: prefect.states.State[Any], force: bool = False, - ) -> OrchestrationResult: + ) -> OrchestrationResult[Any]: """ Set the state of a task run. @@ -4211,9 +4265,12 @@ def set_task_run_state( f"/task_runs/{task_run_id}/set_state", json=dict(state=state_create.model_dump(mode="json"), force=force), ) - return OrchestrationResult.model_validate(response.json()) + result: OrchestrationResult[Any] = OrchestrationResult.model_validate( + response.json() + ) + return result - def read_task_run_states(self, task_run_id: UUID) -> List[prefect.states.State]: + def read_task_run_states(self, task_run_id: UUID) -> list[prefect.states.State]: """ Query for the states of a task run @@ -4226,7 +4283,7 @@ def read_task_run_states(self, task_run_id: UUID) -> List[prefect.states.State]: response = self._client.get( "/task_run_states/", params=dict(task_run_id=str(task_run_id)) ) - return pydantic.TypeAdapter(List[prefect.states.State]).validate_python( + return pydantic.TypeAdapter(list[prefect.states.State]).validate_python( response.json() ) @@ -4300,7 +4357,7 @@ def create_artifact( return Artifact.model_validate(response.json()) def release_concurrency_slots( - self, names: List[str], slots: int, occupancy_seconds: float + self, names: list[str], slots: int, occupancy_seconds: float ) -> httpx.Response: """ Release concurrency slots for the specified limits. @@ -4324,7 +4381,7 @@ def release_concurrency_slots( ) def decrement_v1_concurrency_slots( - self, names: List[str], occupancy_seconds: float, task_run_id: UUID + self, names: list[str], occupancy_seconds: float, task_run_id: UUID ) -> httpx.Response: """ Release the specified concurrency limits. @@ -4346,3 +4403,103 @@ def decrement_v1_concurrency_slots( "task_run_id": str(task_run_id), }, ) + + def update_flow_run_labels( + self, flow_run_id: UUID, labels: KeyValueLabelsField + ) -> None: + """ + Updates the labels of a flow run. + """ + response = self._client.patch( + f"/flow_runs/{flow_run_id}/labels", + json=labels, + ) + response.raise_for_status() + + def read_block_document_by_name( + self, + name: str, + block_type_slug: str, + include_secrets: bool = True, + ) -> BlockDocument: + """ + Read the block document with the specified name that corresponds to a + specific block type name. + + Args: + name: The block document name. + block_type_slug: The block type slug. + include_secrets (bool): whether to include secret values + on the Block, corresponding to Pydantic's `SecretStr` and + `SecretBytes` fields. These fields are automatically obfuscated + by Pydantic, but users can additionally choose not to receive + their values from the API. Note that any business logic on the + Block may not work if this is `False`. + + Raises: + httpx.RequestError: if the block document was not found for any reason + + Returns: + A block document or None. + """ + try: + response = self._client.get( + f"/block_types/slug/{block_type_slug}/block_documents/name/{name}", + params=dict(include_secrets=include_secrets), + ) + except httpx.HTTPStatusError as e: + if e.response.status_code == status.HTTP_404_NOT_FOUND: + raise prefect.exceptions.ObjectNotFound(http_exc=e) from e + else: + raise + return BlockDocument.model_validate(response.json()) + + def create_variable(self, variable: VariableCreate) -> Variable: + """ + Creates an variable with the provided configuration. + + Args: + variable: Desired configuration for the new variable. + Returns: + Information about the newly created variable. + """ + response = self._client.post( + "/variables/", + json=variable.model_dump(mode="json", exclude_unset=True), + ) + return Variable(**response.json()) + + def update_variable(self, variable: VariableUpdate) -> None: + """ + Updates a variable with the provided configuration. + + Args: + variable: Desired configuration for the updated variable. + Returns: + Information about the updated variable. + """ + self._client.patch( + f"/variables/name/{variable.name}", + json=variable.model_dump(mode="json", exclude_unset=True), + ) + + def read_variable_by_name(self, name: str) -> Optional[Variable]: + """Reads a variable by name. Returns None if no variable is found.""" + try: + response = self._client.get(f"/variables/name/{name}") + return Variable(**response.json()) + except httpx.HTTPStatusError as e: + if e.response.status_code == status.HTTP_404_NOT_FOUND: + return None + else: + raise + + def delete_variable_by_name(self, name: str) -> None: + """Deletes a variable by name.""" + try: + self._client.delete(f"/variables/name/{name}") + except httpx.HTTPStatusError as e: + if e.response.status_code == 404: + raise prefect.exceptions.ObjectNotFound(http_exc=e) from e + else: + raise diff --git a/src/prefect/client/schemas/__init__.py b/src/prefect/client/schemas/__init__.py index c5335d4906b0..2a35e6a1f3c0 100644 --- a/src/prefect/client/schemas/__init__.py +++ b/src/prefect/client/schemas/__init__.py @@ -25,3 +25,27 @@ StateAcceptDetails, StateRejectDetails, ) + +__all__ = ( + "BlockDocument", + "BlockSchema", + "BlockType", + "BlockTypeUpdate", + "DEFAULT_BLOCK_SCHEMA_VERSION", + "FlowRun", + "FlowRunPolicy", + "OrchestrationResult", + "SetStateStatus", + "State", + "StateAbortDetails", + "StateAcceptDetails", + "StateCreate", + "StateDetails", + "StateRejectDetails", + "StateType", + "TaskRun", + "TaskRunInput", + "TaskRunPolicy", + "TaskRunResult", + "Workspace", +) diff --git a/src/prefect/client/schemas/actions.py b/src/prefect/client/schemas/actions.py index 9e0dd4bd3052..28be60e39cc5 100644 --- a/src/prefect/client/schemas/actions.py +++ b/src/prefect/client/schemas/actions.py @@ -1,10 +1,9 @@ from copy import deepcopy -from typing import TYPE_CHECKING, Any, Dict, List, Optional, TypeVar, Union +from typing import TYPE_CHECKING, Any, Optional, TypeVar, Union from uuid import UUID, uuid4 import jsonschema from pydantic import Field, field_validator, model_validator -from pydantic_extra_types.pendulum_dt import DateTime import prefect.client.schemas.objects as objects from prefect._internal.schemas.bases import ActionBaseModel @@ -27,6 +26,7 @@ from prefect.settings import PREFECT_DEPLOYMENT_SCHEDULE_MAX_SCHEDULED_RUNS from prefect.types import ( MAX_VARIABLE_NAME_LENGTH, + DateTime, KeyValueLabelsField, Name, NonEmptyishName, @@ -51,7 +51,7 @@ class StateCreate(ActionBaseModel): name: Optional[str] = Field(default=None) message: Optional[str] = Field(default=None, examples=["Run started"]) state_details: StateDetails = Field(default_factory=StateDetails) - data: Union["BaseResult[R]", "ResultRecordMetadata", Any] = Field( + data: Union["BaseResult[Any]", "ResultRecordMetadata", Any] = Field( default=None, ) @@ -62,18 +62,19 @@ class FlowCreate(ActionBaseModel): name: str = Field( default=..., description="The name of the flow", examples=["my-flow"] ) - tags: List[str] = Field( + tags: list[str] = Field( default_factory=list, description="A list of flow tags", examples=[["tag-1", "tag-2"]], ) - labels: KeyValueLabelsField + + labels: KeyValueLabelsField = Field(default_factory=dict) class FlowUpdate(ActionBaseModel): """Data used by the Prefect REST API to update a flow.""" - tags: List[str] = Field( + tags: list[str] = Field( default_factory=list, description="A list of flow tags", examples=[["tag-1", "tag-2"]], @@ -94,7 +95,7 @@ class DeploymentScheduleCreate(ActionBaseModel): @field_validator("max_scheduled_runs") @classmethod - def validate_max_scheduled_runs(cls, v): + def validate_max_scheduled_runs(cls, v: Optional[int]) -> Optional[int]: return validate_schedule_max_scheduled_runs( v, PREFECT_DEPLOYMENT_SCHEDULE_MAX_SCHEDULED_RUNS.value() ) @@ -115,7 +116,7 @@ class DeploymentScheduleUpdate(ActionBaseModel): @field_validator("max_scheduled_runs") @classmethod - def validate_max_scheduled_runs(cls, v): + def validate_max_scheduled_runs(cls, v: Optional[int]) -> Optional[int]: return validate_schedule_max_scheduled_runs( v, PREFECT_DEPLOYMENT_SCHEDULE_MAX_SCHEDULED_RUNS.value() ) @@ -126,18 +127,20 @@ class DeploymentCreate(ActionBaseModel): @model_validator(mode="before") @classmethod - def remove_old_fields(cls, values): + def remove_old_fields(cls, values: dict[str, Any]) -> dict[str, Any]: return remove_old_deployment_fields(values) @field_validator("description", "tags", mode="before") @classmethod - def convert_to_strings(cls, values): + def convert_to_strings( + cls, values: Optional[Union[str, list[str]]] + ) -> Union[str, list[str]]: return convert_to_strings(values) name: str = Field(..., description="The name of the deployment.") flow_id: UUID = Field(..., description="The ID of the flow to deploy.") - paused: Optional[bool] = Field(None) - schedules: List[DeploymentScheduleCreate] = Field( + paused: Optional[bool] = Field(default=None) + schedules: list[DeploymentScheduleCreate] = Field( default_factory=list, description="A list of schedules for the deployment.", ) @@ -155,33 +158,33 @@ def convert_to_strings(cls, values): "Whether or not the deployment should enforce the parameter schema." ), ) - parameter_openapi_schema: Optional[Dict[str, Any]] = Field(default_factory=dict) - parameters: Dict[str, Any] = Field( + parameter_openapi_schema: Optional[dict[str, Any]] = Field(default_factory=dict) + parameters: dict[str, Any] = Field( default_factory=dict, description="Parameters for flow runs scheduled by the deployment.", ) - tags: List[str] = Field(default_factory=list) - labels: KeyValueLabelsField - pull_steps: Optional[List[dict]] = Field(None) + tags: list[str] = Field(default_factory=list) + labels: KeyValueLabelsField = Field(default_factory=dict) + pull_steps: Optional[list[dict[str, Any]]] = Field(default=None) - work_queue_name: Optional[str] = Field(None) + work_queue_name: Optional[str] = Field(default=None) work_pool_name: Optional[str] = Field( default=None, description="The name of the deployment's work pool.", examples=["my-work-pool"], ) - storage_document_id: Optional[UUID] = Field(None) - infrastructure_document_id: Optional[UUID] = Field(None) - description: Optional[str] = Field(None) - path: Optional[str] = Field(None) - version: Optional[str] = Field(None) - entrypoint: Optional[str] = Field(None) - job_variables: Dict[str, Any] = Field( + storage_document_id: Optional[UUID] = Field(default=None) + infrastructure_document_id: Optional[UUID] = Field(default=None) + description: Optional[str] = Field(default=None) + path: Optional[str] = Field(default=None) + version: Optional[str] = Field(default=None) + entrypoint: Optional[str] = Field(default=None) + job_variables: dict[str, Any] = Field( default_factory=dict, description="Overrides to apply to flow run infrastructure at runtime.", ) - def check_valid_configuration(self, base_job_template: dict): + def check_valid_configuration(self, base_job_template: dict[str, Any]) -> None: """Check that the combination of base_job_template defaults and job_variables conforms to the specified schema. """ @@ -206,19 +209,19 @@ class DeploymentUpdate(ActionBaseModel): @model_validator(mode="before") @classmethod - def remove_old_fields(cls, values): + def remove_old_fields(cls, values: dict[str, Any]) -> dict[str, Any]: return remove_old_deployment_fields(values) - version: Optional[str] = Field(None) - description: Optional[str] = Field(None) - parameters: Optional[Dict[str, Any]] = Field( + version: Optional[str] = Field(default=None) + description: Optional[str] = Field(default=None) + parameters: Optional[dict[str, Any]] = Field( default=None, description="Parameters for flow runs scheduled by the deployment.", ) paused: Optional[bool] = Field( default=None, description="Whether or not the deployment is paused." ) - schedules: Optional[List[DeploymentScheduleCreate]] = Field( + schedules: Optional[list[DeploymentScheduleCreate]] = Field( default=None, description="A list of schedules for the deployment.", ) @@ -230,21 +233,21 @@ def remove_old_fields(cls, values): default=None, description="The concurrency options for the deployment.", ) - tags: List[str] = Field(default_factory=list) - work_queue_name: Optional[str] = Field(None) + tags: list[str] = Field(default_factory=list) + work_queue_name: Optional[str] = Field(default=None) work_pool_name: Optional[str] = Field( default=None, description="The name of the deployment's work pool.", examples=["my-work-pool"], ) - path: Optional[str] = Field(None) - job_variables: Optional[Dict[str, Any]] = Field( + path: Optional[str] = Field(default=None) + job_variables: Optional[dict[str, Any]] = Field( default_factory=dict, description="Overrides to apply to flow run infrastructure at runtime.", ) - entrypoint: Optional[str] = Field(None) - storage_document_id: Optional[UUID] = Field(None) - infrastructure_document_id: Optional[UUID] = Field(None) + entrypoint: Optional[str] = Field(default=None) + storage_document_id: Optional[UUID] = Field(default=None) + infrastructure_document_id: Optional[UUID] = Field(default=None) enforce_parameter_schema: Optional[bool] = Field( default=None, description=( @@ -252,7 +255,7 @@ def remove_old_fields(cls, values): ), ) - def check_valid_configuration(self, base_job_template: dict): + def check_valid_configuration(self, base_job_template: dict[str, Any]) -> None: """Check that the combination of base_job_template defaults and job_variables conforms to the specified schema. """ @@ -276,15 +279,15 @@ def check_valid_configuration(self, base_job_template: dict): class FlowRunUpdate(ActionBaseModel): """Data used by the Prefect REST API to update a flow run.""" - name: Optional[str] = Field(None) - flow_version: Optional[str] = Field(None) - parameters: Optional[Dict[str, Any]] = Field(default_factory=dict) + name: Optional[str] = Field(default=None) + flow_version: Optional[str] = Field(default=None) + parameters: Optional[dict[str, Any]] = Field(default_factory=dict) empirical_policy: objects.FlowRunPolicy = Field( default_factory=objects.FlowRunPolicy ) - tags: List[str] = Field(default_factory=list) - infrastructure_pid: Optional[str] = Field(None) - job_variables: Optional[Dict[str, Any]] = Field(None) + tags: list[str] = Field(default_factory=list) + infrastructure_pid: Optional[str] = Field(default=None) + job_variables: Optional[dict[str, Any]] = Field(default=None) class TaskRunCreate(ActionBaseModel): @@ -300,7 +303,7 @@ class TaskRunCreate(ActionBaseModel): default=None, description="The name of the task run", ) - flow_run_id: Optional[UUID] = Field(None) + flow_run_id: Optional[UUID] = Field(default=None) task_key: str = Field( default=..., description="A unique identifier for the task being run." ) @@ -311,17 +314,17 @@ class TaskRunCreate(ActionBaseModel): " within the same flow run." ), ) - cache_key: Optional[str] = Field(None) - cache_expiration: Optional[objects.DateTime] = Field(None) - task_version: Optional[str] = Field(None) + cache_key: Optional[str] = Field(default=None) + cache_expiration: Optional[objects.DateTime] = Field(default=None) + task_version: Optional[str] = Field(default=None) empirical_policy: objects.TaskRunPolicy = Field( default_factory=objects.TaskRunPolicy, ) - tags: List[str] = Field(default_factory=list) - labels: KeyValueLabelsField - task_inputs: Dict[ + tags: list[str] = Field(default_factory=list) + labels: KeyValueLabelsField = Field(default_factory=dict) + task_inputs: dict[ str, - List[ + list[ Union[ objects.TaskRunResult, objects.Parameter, @@ -334,7 +337,7 @@ class TaskRunCreate(ActionBaseModel): class TaskRunUpdate(ActionBaseModel): """Data used by the Prefect REST API to update a task run""" - name: Optional[str] = Field(None) + name: Optional[str] = Field(default=None) class FlowRunCreate(ActionBaseModel): @@ -347,22 +350,23 @@ class FlowRunCreate(ActionBaseModel): name: Optional[str] = Field(default=None, description="The name of the flow run.") flow_id: UUID = Field(default=..., description="The id of the flow being run.") - deployment_id: Optional[UUID] = Field(None) - flow_version: Optional[str] = Field(None) - parameters: Dict[str, Any] = Field( + deployment_id: Optional[UUID] = Field(default=None) + flow_version: Optional[str] = Field(default=None) + parameters: dict[str, Any] = Field( default_factory=dict, description="The parameters for the flow run." ) - context: Dict[str, Any] = Field( + context: dict[str, Any] = Field( default_factory=dict, description="The context for the flow run." ) - parent_task_run_id: Optional[UUID] = Field(None) - infrastructure_document_id: Optional[UUID] = Field(None) + parent_task_run_id: Optional[UUID] = Field(default=None) + infrastructure_document_id: Optional[UUID] = Field(default=None) empirical_policy: objects.FlowRunPolicy = Field( default_factory=objects.FlowRunPolicy ) - tags: List[str] = Field(default_factory=list) - labels: KeyValueLabelsField - idempotency_key: Optional[str] = Field(None) + tags: list[str] = Field(default_factory=list) + idempotency_key: Optional[str] = Field(default=None) + + labels: KeyValueLabelsField = Field(default_factory=dict) class DeploymentFlowRunCreate(ActionBaseModel): @@ -374,32 +378,33 @@ class DeploymentFlowRunCreate(ActionBaseModel): ) name: Optional[str] = Field(default=None, description="The name of the flow run.") - parameters: Dict[str, Any] = Field( + parameters: dict[str, Any] = Field( default_factory=dict, description="The parameters for the flow run." ) enforce_parameter_schema: Optional[bool] = Field( default=None, description="Whether or not to enforce the parameter schema on this run.", ) - context: Dict[str, Any] = Field( + context: dict[str, Any] = Field( default_factory=dict, description="The context for the flow run." ) - infrastructure_document_id: Optional[UUID] = Field(None) + infrastructure_document_id: Optional[UUID] = Field(default=None) empirical_policy: objects.FlowRunPolicy = Field( default_factory=objects.FlowRunPolicy ) - tags: List[str] = Field(default_factory=list) + tags: list[str] = Field(default_factory=list) idempotency_key: Optional[str] = Field(None) parent_task_run_id: Optional[UUID] = Field(None) work_queue_name: Optional[str] = Field(None) job_variables: Optional[dict] = Field(None) + labels: KeyValueLabelsField = Field(default_factory=dict) class SavedSearchCreate(ActionBaseModel): """Data used by the Prefect REST API to create a saved search.""" name: str = Field(default=..., description="The name of the saved search.") - filters: List[objects.SavedSearchFilter] = Field( + filters: list[objects.SavedSearchFilter] = Field( default_factory=list, description="The filter set for the saved search." ) @@ -436,12 +441,12 @@ class ConcurrencyLimitV2Create(ActionBaseModel): class ConcurrencyLimitV2Update(ActionBaseModel): """Data used by the Prefect REST API to update a v2 concurrency limit.""" - active: Optional[bool] = Field(None) - name: Optional[Name] = Field(None) - limit: Optional[NonNegativeInteger] = Field(None) - active_slots: Optional[NonNegativeInteger] = Field(None) - denied_slots: Optional[NonNegativeInteger] = Field(None) - slot_decay_per_second: Optional[NonNegativeFloat] = Field(None) + active: Optional[bool] = Field(default=None) + name: Optional[Name] = Field(default=None) + limit: Optional[NonNegativeInteger] = Field(default=None) + active_slots: Optional[NonNegativeInteger] = Field(default=None) + denied_slots: Optional[NonNegativeInteger] = Field(default=None) + slot_decay_per_second: Optional[NonNegativeFloat] = Field(default=None) class BlockTypeCreate(ActionBaseModel): @@ -471,24 +476,24 @@ class BlockTypeCreate(ActionBaseModel): class BlockTypeUpdate(ActionBaseModel): """Data used by the Prefect REST API to update a block type.""" - logo_url: Optional[objects.HttpUrl] = Field(None) - documentation_url: Optional[objects.HttpUrl] = Field(None) - description: Optional[str] = Field(None) - code_example: Optional[str] = Field(None) + logo_url: Optional[objects.HttpUrl] = Field(default=None) + documentation_url: Optional[objects.HttpUrl] = Field(default=None) + description: Optional[str] = Field(default=None) + code_example: Optional[str] = Field(default=None) @classmethod - def updatable_fields(cls) -> set: + def updatable_fields(cls) -> set[str]: return get_class_fields_only(cls) class BlockSchemaCreate(ActionBaseModel): """Data used by the Prefect REST API to create a block schema.""" - fields: Dict[str, Any] = Field( + fields: dict[str, Any] = Field( default_factory=dict, description="The block schema's field schema" ) - block_type_id: Optional[UUID] = Field(None) - capabilities: List[str] = Field( + block_type_id: Optional[UUID] = Field(default=None) + capabilities: list[str] = Field( default_factory=list, description="A list of Block capabilities", ) @@ -504,7 +509,7 @@ class BlockDocumentCreate(ActionBaseModel): name: Optional[Name] = Field( default=None, description="The name of the block document" ) - data: Dict[str, Any] = Field( + data: dict[str, Any] = Field( default_factory=dict, description="The block document's data" ) block_schema_id: UUID = Field( @@ -524,7 +529,9 @@ class BlockDocumentCreate(ActionBaseModel): _validate_name_format = field_validator("name")(validate_block_document_name) @model_validator(mode="before") - def validate_name_is_present_if_not_anonymous(cls, values): + def validate_name_is_present_if_not_anonymous( + cls, values: dict[str, Any] + ) -> dict[str, Any]: return validate_name_present_on_nonanonymous_blocks(values) @@ -534,7 +541,7 @@ class BlockDocumentUpdate(ActionBaseModel): block_schema_id: Optional[UUID] = Field( default=None, description="A block schema ID" ) - data: Dict[str, Any] = Field( + data: dict[str, Any] = Field( default_factory=dict, description="The block document's data" ) merge_existing_data: bool = Field( @@ -565,11 +572,11 @@ class LogCreate(ActionBaseModel): level: int = Field(default=..., description="The log level.") message: str = Field(default=..., description="The log message.") timestamp: DateTime = Field(default=..., description="The log timestamp.") - flow_run_id: Optional[UUID] = Field(None) - task_run_id: Optional[UUID] = Field(None) - worker_id: Optional[UUID] = Field(None) + flow_run_id: Optional[UUID] = Field(default=None) + task_run_id: Optional[UUID] = Field(default=None) + worker_id: Optional[UUID] = Field(default=None) - def model_dump(self, *args, **kwargs): + def model_dump(self, *args: Any, **kwargs: Any) -> dict[str, Any]: """ The worker_id field is only included in logs sent to Prefect Cloud. If it's unset, we should not include it in the log payload. @@ -586,11 +593,11 @@ class WorkPoolCreate(ActionBaseModel): name: NonEmptyishName = Field( description="The name of the work pool.", ) - description: Optional[str] = Field(None) + description: Optional[str] = Field(default=None) type: str = Field( description="The work pool type.", default="prefect-agent" ) # TODO: change default - base_job_template: Dict[str, Any] = Field( + base_job_template: dict[str, Any] = Field( default_factory=dict, description="The base job template for the work pool.", ) @@ -606,17 +613,17 @@ class WorkPoolCreate(ActionBaseModel): class WorkPoolUpdate(ActionBaseModel): """Data used by the Prefect REST API to update a work pool.""" - description: Optional[str] = Field(None) - is_paused: Optional[bool] = Field(None) - base_job_template: Optional[Dict[str, Any]] = Field(None) - concurrency_limit: Optional[int] = Field(None) + description: Optional[str] = Field(default=None) + is_paused: Optional[bool] = Field(default=None) + base_job_template: Optional[dict[str, Any]] = Field(default=None) + concurrency_limit: Optional[int] = Field(default=None) class WorkQueueCreate(ActionBaseModel): """Data used by the Prefect REST API to create a work queue.""" name: str = Field(default=..., description="The name of the work queue.") - description: Optional[str] = Field(None) + description: Optional[str] = Field(default=None) is_paused: bool = Field( default=False, description="Whether the work queue is paused.", @@ -644,16 +651,16 @@ class WorkQueueCreate(ActionBaseModel): class WorkQueueUpdate(ActionBaseModel): """Data used by the Prefect REST API to update a work queue.""" - name: Optional[str] = Field(None) - description: Optional[str] = Field(None) + name: Optional[str] = Field(default=None) + description: Optional[str] = Field(default=None) is_paused: bool = Field( default=False, description="Whether or not the work queue is paused." ) - concurrency_limit: Optional[NonNegativeInteger] = Field(None) + concurrency_limit: Optional[NonNegativeInteger] = Field(default=None) priority: Optional[PositiveInteger] = Field( None, description="The queue's priority." ) - last_polled: Optional[DateTime] = Field(None) + last_polled: Optional[DateTime] = Field(default=None) # DEPRECATED @@ -670,10 +677,10 @@ class FlowRunNotificationPolicyCreate(ActionBaseModel): is_active: bool = Field( default=True, description="Whether the policy is currently active" ) - state_names: List[str] = Field( + state_names: list[str] = Field( default=..., description="The flow run states that trigger notifications" ) - tags: List[str] = Field( + tags: list[str] = Field( default=..., description="The flow run tags that trigger notifications (set [] to disable)", ) @@ -695,7 +702,7 @@ class FlowRunNotificationPolicyCreate(ActionBaseModel): @field_validator("message_template") @classmethod - def validate_message_template_variables(cls, v): + def validate_message_template_variables(cls, v: Optional[str]) -> Optional[str]: return validate_message_template_variables(v) @@ -703,8 +710,8 @@ class FlowRunNotificationPolicyUpdate(ActionBaseModel): """Data used by the Prefect REST API to update a flow run notification policy.""" is_active: Optional[bool] = Field(default=None) - state_names: Optional[List[str]] = Field(default=None) - tags: Optional[List[str]] = Field(default=None) + state_names: Optional[list[str]] = Field(default=None) + tags: Optional[list[str]] = Field(default=None) block_document_id: Optional[UUID] = Field(default=None) message_template: Optional[str] = Field(default=None) @@ -715,8 +722,8 @@ class ArtifactCreate(ActionBaseModel): key: Optional[str] = Field(default=None) type: Optional[str] = Field(default=None) description: Optional[str] = Field(default=None) - data: Optional[Union[Dict[str, Any], Any]] = Field(default=None) - metadata_: Optional[Dict[str, str]] = Field(default=None) + data: Optional[Union[dict[str, Any], Any]] = Field(default=None) + metadata_: Optional[dict[str, str]] = Field(default=None) flow_run_id: Optional[UUID] = Field(default=None) task_run_id: Optional[UUID] = Field(default=None) @@ -726,9 +733,9 @@ class ArtifactCreate(ActionBaseModel): class ArtifactUpdate(ActionBaseModel): """Data used by the Prefect REST API to update an artifact.""" - data: Optional[Union[Dict[str, Any], Any]] = Field(None) - description: Optional[str] = Field(None) - metadata_: Optional[Dict[str, str]] = Field(None) + data: Optional[Union[dict[str, Any], Any]] = Field(default=None) + description: Optional[str] = Field(default=None) + metadata_: Optional[dict[str, str]] = Field(default=None) class VariableCreate(ActionBaseModel): @@ -745,7 +752,7 @@ class VariableCreate(ActionBaseModel): description="The value of the variable", examples=["my-value"], ) - tags: Optional[List[str]] = Field(default=None) + tags: Optional[list[str]] = Field(default=None) # validators _validate_name_format = field_validator("name")(validate_variable_name) @@ -765,7 +772,7 @@ class VariableUpdate(ActionBaseModel): description="The value of the variable", examples=["my-value"], ) - tags: Optional[List[str]] = Field(default=None) + tags: Optional[list[str]] = Field(default=None) # validators _validate_name_format = field_validator("name")(validate_variable_name) @@ -801,8 +808,8 @@ class GlobalConcurrencyLimitCreate(ActionBaseModel): class GlobalConcurrencyLimitUpdate(ActionBaseModel): """Data used by the Prefect REST API to update a global concurrency limit.""" - name: Optional[Name] = Field(None) - limit: Optional[NonNegativeInteger] = Field(None) - active: Optional[bool] = Field(None) - active_slots: Optional[NonNegativeInteger] = Field(None) - slot_decay_per_second: Optional[NonNegativeFloat] = Field(None) + name: Optional[Name] = Field(default=None) + limit: Optional[NonNegativeInteger] = Field(default=None) + active: Optional[bool] = Field(default=None) + active_slots: Optional[NonNegativeInteger] = Field(default=None) + slot_decay_per_second: Optional[NonNegativeFloat] = Field(default=None) diff --git a/src/prefect/client/schemas/filters.py b/src/prefect/client/schemas/filters.py index 5a4726e75367..52bb7258e700 100644 --- a/src/prefect/client/schemas/filters.py +++ b/src/prefect/client/schemas/filters.py @@ -6,10 +6,10 @@ from uuid import UUID from pydantic import Field -from pydantic_extra_types.pendulum_dt import DateTime from prefect._internal.schemas.bases import PrefectBaseModel from prefect.client.schemas.objects import StateType +from prefect.types import DateTime from prefect.utilities.collections import AutoEnum diff --git a/src/prefect/client/schemas/objects.py b/src/prefect/client/schemas/objects.py index df4661d65d1b..ece3042c02ce 100644 --- a/src/prefect/client/schemas/objects.py +++ b/src/prefect/client/schemas/objects.py @@ -1,15 +1,16 @@ import datetime import warnings +from collections.abc import Callable, Mapping from functools import partial from typing import ( TYPE_CHECKING, Annotated, Any, - Dict, + ClassVar, Generic, - List, Optional, Union, + cast, overload, ) from uuid import UUID, uuid4 @@ -23,13 +24,12 @@ HttpUrl, IPvAnyNetwork, SerializationInfo, + SerializerFunctionWrapHandler, Tag, field_validator, model_serializer, model_validator, ) -from pydantic.functional_validators import ModelWrapValidatorHandler -from pydantic_extra_types.pendulum_dt import DateTime from typing_extensions import Literal, Self, TypeVar from prefect._internal.compatibility import deprecated @@ -64,8 +64,13 @@ from prefect.utilities.pydantic import handle_secret_render if TYPE_CHECKING: + from prefect.client.schemas.actions import StateCreate from prefect.results import BaseResult, ResultRecordMetadata + DateTime = pendulum.DateTime +else: + from prefect.types import DateTime + R = TypeVar("R", default=Any) @@ -180,7 +185,7 @@ class StateDetails(PrefectBaseModel): pause_timeout: Optional[DateTime] = None pause_reschedule: bool = False pause_key: Optional[str] = None - run_input_keyset: Optional[Dict[str, str]] = None + run_input_keyset: Optional[dict[str, str]] = None refresh_cache: Optional[bool] = None retriable: Optional[bool] = None transition_id: Optional[UUID] = None @@ -215,11 +220,21 @@ class State(ObjectBaseModel, Generic[R]): ] = Field(default=None) @overload - def result(self: "State[R]", raise_on_failure: bool = True) -> R: + def result( + self: "State[R]", + raise_on_failure: Literal[True] = ..., + fetch: bool = ..., + retry_result_failure: bool = ..., + ) -> R: ... @overload - def result(self: "State[R]", raise_on_failure: bool = False) -> Union[R, Exception]: + def result( + self: "State[R]", + raise_on_failure: Literal[False] = False, + fetch: bool = ..., + retry_result_failure: bool = ..., + ) -> Union[R, Exception]: ... @deprecated.deprecated_parameter( @@ -311,7 +326,7 @@ def result( retry_result_failure=retry_result_failure, ) - def to_state_create(self): + def to_state_create(self) -> "StateCreate": """ Convert this state to a `StateCreate` type which can be used to set the state of a run in the API. @@ -327,7 +342,7 @@ def to_state_create(self): ) if isinstance(self.data, BaseResult): - data = self.data + data = cast(BaseResult[R], self.data) elif isinstance(self.data, ResultRecord) and should_persist_result(): data = self.data.metadata else: @@ -348,14 +363,14 @@ def default_name_from_type(self) -> Self: # validation check and an error will be raised after this function is called name = self.name if name is None and self.type: - self.name = " ".join([v.capitalize() for v in self.type.value.split("_")]) + self.name = " ".join([v.capitalize() for v in self.type.split("_")]) return self @model_validator(mode="after") def default_scheduled_start_time(self) -> Self: if self.type == StateType.SCHEDULED: if not self.state_details.scheduled_time: - self.state_details.scheduled_time = DateTime.now("utc") + self.state_details.scheduled_time = pendulum.DateTime.now("utc") return self @model_validator(mode="after") @@ -395,17 +410,19 @@ def is_paused(self) -> bool: return self.type == StateType.PAUSED def model_copy( - self, *, update: Optional[Dict[str, Any]] = None, deep: bool = False - ): + self, *, update: Optional[Mapping[str, Any]] = None, deep: bool = False + ) -> Self: """ Copying API models should return an object that could be inserted into the database again. The 'timestamp' is reset using the default factory. """ - update = update or {} - update.setdefault("timestamp", self.model_fields["timestamp"].get_default()) + update = { + "timestamp": self.model_fields["timestamp"].get_default(), + **(update or {}), + } return super().model_copy(update=update, deep=deep) - def fresh_copy(self, **kwargs) -> Self: + def fresh_copy(self, **kwargs: Any) -> Self: """ Return a fresh copy of the state with a new ID. """ @@ -443,12 +460,14 @@ def __str__(self) -> str: `MyCompletedState("my message", type=COMPLETED)` """ - display = [] + display: list[str] = [] if self.message: display.append(repr(self.message)) - if self.type.value.lower() != self.name.lower(): + if TYPE_CHECKING: + assert self.name is not None + if self.type.lower() != self.name.lower(): display.append(f"type={self.type.value}") return f"{self.name}({', '.join(display)})" @@ -487,7 +506,7 @@ class FlowRunPolicy(PrefectBaseModel): retry_delay: Optional[int] = Field( default=None, description="The delay time between retries, in seconds." ) - pause_keys: Optional[set] = Field( + pause_keys: Optional[set[str]] = Field( default_factory=set, description="Tracks pauses this run has observed." ) resuming: Optional[bool] = Field( @@ -499,7 +518,7 @@ class FlowRunPolicy(PrefectBaseModel): @model_validator(mode="before") @classmethod - def populate_deprecated_fields(cls, values: Any): + def populate_deprecated_fields(cls, values: Any) -> Any: if isinstance(values, dict): return set_run_policy_deprecated_fields(values) return values @@ -536,7 +555,7 @@ class FlowRun(ObjectBaseModel): description="The version of the flow executed in this flow run.", examples=["1.0"], ) - parameters: Dict[str, Any] = Field( + parameters: dict[str, Any] = Field( default_factory=dict, description="Parameters for the flow run." ) idempotency_key: Optional[str] = Field( @@ -546,7 +565,7 @@ class FlowRun(ObjectBaseModel): " run is not created multiple times." ), ) - context: Dict[str, Any] = Field( + context: dict[str, Any] = Field( default_factory=dict, description="Additional context for the flow run.", examples=[{"my_var": "my_val"}], @@ -554,12 +573,12 @@ class FlowRun(ObjectBaseModel): empirical_policy: FlowRunPolicy = Field( default_factory=FlowRunPolicy, ) - tags: List[str] = Field( + tags: list[str] = Field( default_factory=list, description="A list of tags on the flow run", examples=[["tag-1", "tag-2"]], ) - labels: KeyValueLabelsField + labels: KeyValueLabelsField = Field(default_factory=dict) parent_task_run_id: Optional[UUID] = Field( default=None, description=( @@ -632,7 +651,7 @@ class FlowRun(ObjectBaseModel): description="The state of the flow run.", examples=["State(type=StateType.COMPLETED)"], ) - job_variables: Optional[dict] = Field( + job_variables: Optional[dict[str, Any]] = Field( default=None, description="Job variables for the flow run.", ) @@ -663,7 +682,7 @@ def __eq__(self, other: Any) -> bool: @field_validator("name", mode="before") @classmethod - def set_default_name(cls, name): + def set_default_name(cls, name: Optional[str]) -> str: return get_or_create_run_name(name) @@ -687,7 +706,7 @@ class TaskRunPolicy(PrefectBaseModel): deprecated=True, ) retries: Optional[int] = Field(default=None, description="The number of retries.") - retry_delay: Union[None, int, List[int]] = Field( + retry_delay: Union[None, int, list[int]] = Field( default=None, description="A delay time or list of delay times between retries, in seconds.", ) @@ -710,18 +729,20 @@ def populate_deprecated_fields(self): self.retries = self.max_retries if not self.retry_delay and self.retry_delay_seconds != 0: - self.retry_delay = self.retry_delay_seconds + self.retry_delay = int(self.retry_delay_seconds) return self @field_validator("retry_delay") @classmethod - def validate_configured_retry_delays(cls, v): + def validate_configured_retry_delays( + cls, v: Optional[list[float]] + ) -> Optional[list[float]]: return list_length_50_or_less(v) @field_validator("retry_jitter_factor") @classmethod - def validate_jitter_factor(cls, v): + def validate_jitter_factor(cls, v: Optional[float]) -> Optional[float]: return validate_not_negative(v) @@ -731,9 +752,11 @@ class TaskRunInput(PrefectBaseModel): could include, constants, parameters, or other task runs. """ - model_config = ConfigDict(frozen=True) + model_config: ClassVar[ConfigDict] = ConfigDict(frozen=True) - input_type: str + if not TYPE_CHECKING: + # subclasses provide the concrete type for this field + input_type: str class TaskRunResult(TaskRunInput): @@ -791,16 +814,16 @@ class TaskRun(ObjectBaseModel): empirical_policy: TaskRunPolicy = Field( default_factory=TaskRunPolicy, ) - tags: List[str] = Field( + tags: list[str] = Field( default_factory=list, description="A list of tags for the task run.", examples=[["tag-1", "tag-2"]], ) - labels: KeyValueLabelsField + labels: KeyValueLabelsField = Field(default_factory=dict) state_id: Optional[UUID] = Field( default=None, description="The id of the current task run state." ) - task_inputs: Dict[str, List[Union[TaskRunResult, Parameter, Constant]]] = Field( + task_inputs: dict[str, list[Union[TaskRunResult, Parameter, Constant]]] = Field( default_factory=dict, description=( "Tracks the source of inputs to a task run. Used for internal bookkeeping. " @@ -865,7 +888,7 @@ class TaskRun(ObjectBaseModel): @field_validator("name", mode="before") @classmethod - def set_default_name(cls, name): + def set_default_name(cls, name: Optional[str]) -> Name: return get_or_create_run_name(name) @@ -883,7 +906,7 @@ class Workspace(PrefectBaseModel): workspace_name: str = Field(..., description="The workspace name.") workspace_description: str = Field(..., description="Description of the workspace.") workspace_handle: str = Field(..., description="The workspace's unique handle.") - model_config = ConfigDict(extra="ignore") + model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore") @property def handle(self) -> str: @@ -912,7 +935,7 @@ def ui_url(self) -> str: f"/workspace/{self.workspace_id}" ) - def __hash__(self): + def __hash__(self) -> int: return hash(self.handle) @@ -935,7 +958,7 @@ class IPAllowlist(PrefectBaseModel): Expected payload for an IP allowlist from the Prefect Cloud API. """ - entries: List[IPAllowlistEntry] + entries: list[IPAllowlistEntry] class IPAllowlistMyAccessResponse(PrefectBaseModel): @@ -973,14 +996,14 @@ class BlockSchema(ObjectBaseModel): """A representation of a block schema.""" checksum: str = Field(default=..., description="The block schema's unique checksum") - fields: Dict[str, Any] = Field( + fields: dict[str, Any] = Field( default_factory=dict, description="The block schema's field schema" ) block_type_id: Optional[UUID] = Field(default=..., description="A block type ID") block_type: Optional[BlockType] = Field( default=None, description="The associated block type" ) - capabilities: List[str] = Field( + capabilities: list[str] = Field( default_factory=list, description="A list of Block capabilities", ) @@ -999,7 +1022,7 @@ class BlockDocument(ObjectBaseModel): "The block document's name. Not required for anonymous block documents." ), ) - data: Dict[str, Any] = Field( + data: dict[str, Any] = Field( default_factory=dict, description="The block document's data" ) block_schema_id: UUID = Field(default=..., description="A block schema ID") @@ -1011,7 +1034,7 @@ class BlockDocument(ObjectBaseModel): block_type: Optional[BlockType] = Field( default=None, description="The associated block type" ) - block_document_references: Dict[str, Dict[str, Any]] = Field( + block_document_references: dict[str, dict[str, Any]] = Field( default_factory=dict, description="Record of the block document's references" ) is_anonymous: bool = Field( @@ -1026,13 +1049,15 @@ class BlockDocument(ObjectBaseModel): @model_validator(mode="before") @classmethod - def validate_name_is_present_if_not_anonymous(cls, values): + def validate_name_is_present_if_not_anonymous( + cls, values: dict[str, Any] + ) -> dict[str, Any]: return validate_name_present_on_nonanonymous_blocks(values) @model_serializer(mode="wrap") def serialize_data( - self, handler: ModelWrapValidatorHandler, info: SerializationInfo - ): + self, handler: SerializerFunctionWrapHandler, info: SerializationInfo + ) -> Any: self.data = visit_collection( self.data, visit_fn=partial(handle_secret_render, context=info.context or {}), @@ -1047,7 +1072,7 @@ class Flow(ObjectBaseModel): name: Name = Field( default=..., description="The name of the flow", examples=["my-flow"] ) - tags: List[str] = Field( + tags: list[str] = Field( default_factory=list, description="A list of flow tags", examples=[["tag-1", "tag-2"]], @@ -1091,22 +1116,22 @@ class Deployment(ObjectBaseModel): concurrency_limit: Optional[int] = Field( default=None, description="The concurrency limit for the deployment." ) - schedules: List[DeploymentSchedule] = Field( + schedules: list[DeploymentSchedule] = Field( default_factory=list, description="A list of schedules for the deployment." ) - job_variables: Dict[str, Any] = Field( + job_variables: dict[str, Any] = Field( default_factory=dict, description="Overrides to apply to flow run infrastructure at runtime.", ) - parameters: Dict[str, Any] = Field( + parameters: dict[str, Any] = Field( default_factory=dict, description="Parameters for flow runs scheduled by the deployment.", ) - pull_steps: Optional[List[dict]] = Field( + pull_steps: Optional[list[dict[str, Any]]] = Field( default=None, description="Pull steps for cloning and running this deployment.", ) - tags: List[str] = Field( + tags: list[str] = Field( default_factory=list, description="A list of tags for the deployment", examples=[["tag-1", "tag-2"]], @@ -1123,7 +1148,7 @@ class Deployment(ObjectBaseModel): default=None, description="The last time the deployment was polled for status updates.", ) - parameter_openapi_schema: Optional[Dict[str, Any]] = Field( + parameter_openapi_schema: Optional[dict[str, Any]] = Field( default=None, description="The parameter schema of the flow, including defaults.", ) @@ -1177,7 +1202,7 @@ class ConcurrencyLimit(ObjectBaseModel): default=..., description="A tag the concurrency limit is applied to." ) concurrency_limit: int = Field(default=..., description="The concurrency limit.") - active_slots: List[UUID] = Field( + active_slots: list[UUID] = Field( default_factory=list, description="A list of active run ids using a concurrency slot", ) @@ -1224,7 +1249,7 @@ class BlockDocumentReference(ObjectBaseModel): @model_validator(mode="before") @classmethod - def validate_parent_and_ref_are_different(cls, values): + def validate_parent_and_ref_are_different(cls, values: Any) -> Any: if isinstance(values, dict): return validate_parent_and_ref_diff(values) return values @@ -1234,7 +1259,7 @@ class Configuration(ObjectBaseModel): """An ORM representation of account info.""" key: str = Field(default=..., description="Account info key") - value: Dict[str, Any] = Field(default=..., description="Account info") + value: dict[str, Any] = Field(default=..., description="Account info") class SavedSearchFilter(PrefectBaseModel): @@ -1258,7 +1283,7 @@ class SavedSearch(ObjectBaseModel): """An ORM representation of saved search data. Represents a set of filter criteria.""" name: str = Field(default=..., description="The name of the saved search.") - filters: List[SavedSearchFilter] = Field( + filters: list[SavedSearchFilter] = Field( default_factory=list, description="The filter set for the saved search." ) @@ -1281,11 +1306,11 @@ class Log(ObjectBaseModel): class QueueFilter(PrefectBaseModel): """Filter criteria definition for a work queue.""" - tags: Optional[List[str]] = Field( + tags: Optional[list[str]] = Field( default=None, description="Only include flow runs with these tags in the work queue.", ) - deployment_ids: Optional[List[UUID]] = Field( + deployment_ids: Optional[list[UUID]] = Field( default=None, description="Only include flow runs from these deployments in the work queue.", ) @@ -1345,7 +1370,7 @@ class WorkQueueHealthPolicy(PrefectBaseModel): ) def evaluate_health_status( - self, late_runs_count: int, last_polled: Optional[DateTime] = None + self, late_runs_count: int, last_polled: Optional[pendulum.DateTime] = None ) -> bool: """ Given empirical information about the state of the work queue, evaluate its health status. @@ -1397,10 +1422,10 @@ class FlowRunNotificationPolicy(ObjectBaseModel): is_active: bool = Field( default=True, description="Whether the policy is currently active" ) - state_names: List[str] = Field( + state_names: list[str] = Field( default=..., description="The flow run states that trigger notifications" ) - tags: List[str] = Field( + tags: list[str] = Field( default=..., description="The flow run tags that trigger notifications (set [] to disable)", ) @@ -1422,7 +1447,7 @@ class FlowRunNotificationPolicy(ObjectBaseModel): @field_validator("message_template") @classmethod - def validate_message_template_variables(cls, v): + def validate_message_template_variables(cls, v: Optional[str]) -> Optional[str]: return validate_message_template_variables(v) @@ -1454,7 +1479,7 @@ class WorkPool(ObjectBaseModel): default=None, description="A description of the work pool." ) type: str = Field(description="The work pool type.") - base_job_template: Dict[str, Any] = Field( + base_job_template: dict[str, Any] = Field( default_factory=dict, description="The work pool's base job template." ) is_paused: bool = Field( @@ -1469,10 +1494,12 @@ class WorkPool(ObjectBaseModel): ) # this required field has a default of None so that the custom validator - # below will be called and produce a more helpful error message - default_queue_id: UUID = Field( - None, description="The id of the pool's default queue." - ) + # below will be called and produce a more helpful error message. Because + # the field metadata is attached via an annotation, the default is hidden + # from type checkers. + default_queue_id: Annotated[ + UUID, Field(default=None, description="The id of the pool's default queue.") + ] @property def is_push_pool(self) -> bool: @@ -1484,7 +1511,7 @@ def is_managed_pool(self) -> bool: @field_validator("default_queue_id") @classmethod - def helpful_error_for_missing_default_queue_id(cls, v): + def helpful_error_for_missing_default_queue_id(cls, v: Optional[UUID]) -> UUID: return validate_default_queue_id_not_none(v) @@ -1495,8 +1522,8 @@ class Worker(ObjectBaseModel): work_pool_id: UUID = Field( description="The work pool with which the queue is associated." ) - last_heartbeat_time: datetime.datetime = Field( - None, description="The last time the worker process sent a heartbeat." + last_heartbeat_time: Optional[datetime.datetime] = Field( + default=None, description="The last time the worker process sent a heartbeat." ) heartbeat_interval_seconds: Optional[int] = Field( default=None, @@ -1529,14 +1556,14 @@ class Artifact(ObjectBaseModel): default=None, description="A markdown-enabled description of the artifact." ) # data will eventually be typed as `Optional[Union[Result, Any]]` - data: Optional[Union[Dict[str, Any], Any]] = Field( + data: Optional[Union[dict[str, Any], Any]] = Field( default=None, description=( "Data associated with the artifact, e.g. a result.; structure depends on" " the artifact type." ), ) - metadata_: Optional[Dict[str, str]] = Field( + metadata_: Optional[dict[str, str]] = Field( default=None, description=( "User-defined artifact metadata. Content must be string key and value" @@ -1552,7 +1579,9 @@ class Artifact(ObjectBaseModel): @field_validator("metadata_") @classmethod - def validate_metadata_length(cls, v): + def validate_metadata_length( + cls, v: Optional[dict[str, str]] + ) -> Optional[dict[str, str]]: return validate_max_metadata_length(v) @@ -1571,14 +1600,14 @@ class ArtifactCollection(ObjectBaseModel): description: Optional[str] = Field( default=None, description="A markdown-enabled description of the artifact." ) - data: Optional[Union[Dict[str, Any], Any]] = Field( + data: Optional[Union[dict[str, Any], Any]] = Field( default=None, description=( "Data associated with the artifact, e.g. a result.; structure depends on" " the artifact type." ), ) - metadata_: Optional[Dict[str, str]] = Field( + metadata_: Optional[dict[str, str]] = Field( default=None, description=( "User-defined artifact metadata. Content must be string key and value" @@ -1605,7 +1634,7 @@ class Variable(ObjectBaseModel): description="The value of the variable", examples=["my_value"], ) - tags: List[str] = Field( + tags: list[str] = Field( default_factory=list, description="A list of variable tags", examples=[["tag-1", "tag-2"]], @@ -1630,7 +1659,7 @@ def decoded_value(self) -> Any: @field_validator("key", check_fields=False) @classmethod - def validate_name_characters(cls, v): + def validate_name_characters(cls, v: str) -> str: raise_on_name_alphanumeric_dashes_only(v) return v @@ -1675,7 +1704,7 @@ class CsrfToken(ObjectBaseModel): ) -__getattr__ = getattr_migration(__name__) +__getattr__: Callable[[str], Any] = getattr_migration(__name__) class Integration(PrefectBaseModel): @@ -1693,7 +1722,7 @@ class WorkerMetadata(PrefectBaseModel): should support flexible metadata. """ - integrations: List[Integration] = Field( + integrations: list[Integration] = Field( default=..., description="Prefect integrations installed in the worker." ) - model_config = ConfigDict(extra="allow") + model_config: ClassVar[ConfigDict] = ConfigDict(extra="allow") diff --git a/src/prefect/client/schemas/responses.py b/src/prefect/client/schemas/responses.py index 29102b65f022..ce76537001f0 100644 --- a/src/prefect/client/schemas/responses.py +++ b/src/prefect/client/schemas/responses.py @@ -1,19 +1,18 @@ import datetime -from typing import Any, Dict, List, Optional, TypeVar, Union +from typing import Any, ClassVar, Generic, Optional, TypeVar, Union from uuid import UUID from pydantic import ConfigDict, Field -from pydantic_extra_types.pendulum_dt import DateTime from typing_extensions import Literal import prefect.client.schemas.objects as objects from prefect._internal.schemas.bases import ObjectBaseModel, PrefectBaseModel from prefect._internal.schemas.fields import CreatedBy, UpdatedBy -from prefect.types import KeyValueLabelsField +from prefect.types import DateTime, KeyValueLabelsField from prefect.utilities.collections import AutoEnum from prefect.utilities.names import generate_slug -R = TypeVar("R") +T = TypeVar("T") class SetStateStatus(AutoEnum): @@ -120,7 +119,7 @@ class HistoryResponse(PrefectBaseModel): interval_end: DateTime = Field( default=..., description="The end date of the interval." ) - states: List[HistoryResponseState] = Field( + states: list[HistoryResponseState] = Field( default=..., description="A list of state histories during the interval." ) @@ -130,18 +129,18 @@ class HistoryResponse(PrefectBaseModel): ] -class OrchestrationResult(PrefectBaseModel): +class OrchestrationResult(PrefectBaseModel, Generic[T]): """ A container for the output of state orchestration. """ - state: Optional[objects.State] + state: Optional[objects.State[T]] status: SetStateStatus details: StateResponseDetails class WorkerFlowRunResponse(PrefectBaseModel): - model_config = ConfigDict(arbitrary_types_allowed=True) + model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True) work_pool_id: UUID work_queue_id: UUID @@ -179,7 +178,7 @@ class FlowRunResponse(ObjectBaseModel): description="The version of the flow executed in this flow run.", examples=["1.0"], ) - parameters: Dict[str, Any] = Field( + parameters: dict[str, Any] = Field( default_factory=dict, description="Parameters for the flow run." ) idempotency_key: Optional[str] = Field( @@ -189,7 +188,7 @@ class FlowRunResponse(ObjectBaseModel): " run is not created multiple times." ), ) - context: Dict[str, Any] = Field( + context: dict[str, Any] = Field( default_factory=dict, description="Additional context for the flow run.", examples=[{"my_var": "my_val"}], @@ -197,7 +196,7 @@ class FlowRunResponse(ObjectBaseModel): empirical_policy: objects.FlowRunPolicy = Field( default_factory=objects.FlowRunPolicy, ) - tags: List[str] = Field( + tags: list[str] = Field( default_factory=list, description="A list of tags on the flow run", examples=[["tag-1", "tag-2"]], @@ -275,7 +274,7 @@ class FlowRunResponse(ObjectBaseModel): description="The state of the flow run.", examples=["objects.State(type=objects.StateType.COMPLETED)"], ) - job_variables: Optional[dict] = Field( + job_variables: Optional[dict[str, Any]] = Field( default=None, description="Job variables for the flow run." ) @@ -335,22 +334,22 @@ class DeploymentResponse(ObjectBaseModel): default=None, description="The concurrency options for the deployment.", ) - schedules: List[objects.DeploymentSchedule] = Field( + schedules: list[objects.DeploymentSchedule] = Field( default_factory=list, description="A list of schedules for the deployment." ) - job_variables: Dict[str, Any] = Field( + job_variables: dict[str, Any] = Field( default_factory=dict, description="Overrides to apply to flow run infrastructure at runtime.", ) - parameters: Dict[str, Any] = Field( + parameters: dict[str, Any] = Field( default_factory=dict, description="Parameters for flow runs scheduled by the deployment.", ) - pull_steps: Optional[List[dict]] = Field( + pull_steps: Optional[list[dict[str, Any]]] = Field( default=None, description="Pull steps for cloning and running this deployment.", ) - tags: List[str] = Field( + tags: list[str] = Field( default_factory=list, description="A list of tags for the deployment", examples=[["tag-1", "tag-2"]], @@ -367,7 +366,7 @@ class DeploymentResponse(ObjectBaseModel): default=None, description="The last time the deployment was polled for status updates.", ) - parameter_openapi_schema: Optional[Dict[str, Any]] = Field( + parameter_openapi_schema: Optional[dict[str, Any]] = Field( default=None, description="The parameter schema of the flow, including defaults.", ) @@ -400,7 +399,7 @@ class DeploymentResponse(ObjectBaseModel): default=None, description="Optional information about the updater of this deployment.", ) - work_queue_id: UUID = Field( + work_queue_id: Optional[UUID] = Field( default=None, description=( "The id of the work pool queue to which this deployment is assigned." @@ -423,7 +422,7 @@ class DeploymentResponse(ObjectBaseModel): class MinimalConcurrencyLimitResponse(PrefectBaseModel): - model_config = ConfigDict(extra="ignore") + model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore") id: UUID name: str diff --git a/src/prefect/client/schemas/schedules.py b/src/prefect/client/schemas/schedules.py index 1a2b97a74f8f..2437e194ad83 100644 --- a/src/prefect/client/schemas/schedules.py +++ b/src/prefect/client/schemas/schedules.py @@ -3,13 +3,13 @@ """ import datetime -from typing import Annotated, Any, Optional, Union +from typing import TYPE_CHECKING, Annotated, Any, ClassVar, Optional, Union import dateutil import dateutil.rrule +import dateutil.tz import pendulum from pydantic import AfterValidator, ConfigDict, Field, field_validator, model_validator -from pydantic_extra_types.pendulum_dt import DateTime from typing_extensions import TypeAlias, TypeGuard from prefect._internal.schemas.bases import PrefectBaseModel @@ -20,6 +20,14 @@ validate_rrule_string, ) +if TYPE_CHECKING: + # type checkers have difficulty accepting that + # pydantic_extra_types.pendulum_dt and pendulum.DateTime can be used + # together. + DateTime = pendulum.DateTime +else: + from prefect.types import DateTime + MAX_ITERATIONS = 1000 # approx. 1 years worth of RDATEs + buffer MAX_RRULE_LENGTH = 6500 @@ -54,7 +62,7 @@ class IntervalSchedule(PrefectBaseModel): timezone (str, optional): a valid timezone string """ - model_config = ConfigDict(extra="forbid", exclude_none=True) + model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid") interval: datetime.timedelta = Field(gt=datetime.timedelta(0)) anchor_date: Annotated[DateTime, AfterValidator(default_anchor_date)] = Field( @@ -68,6 +76,19 @@ def validate_timezone(self): self.timezone = default_timezone(self.timezone, self.model_dump()) return self + if TYPE_CHECKING: + # The model accepts str or datetime values for `anchor_date` + def __init__( + self, + /, + interval: datetime.timedelta, + anchor_date: Optional[ + Union[pendulum.DateTime, datetime.datetime, str] + ] = None, + timezone: Optional[str] = None, + ) -> None: + ... + class CronSchedule(PrefectBaseModel): """ @@ -94,7 +115,7 @@ class CronSchedule(PrefectBaseModel): """ - model_config = ConfigDict(extra="forbid") + model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid") cron: str = Field(default=..., examples=["0 0 * * *"]) timezone: Optional[str] = Field(default=None, examples=["America/New_York"]) @@ -107,18 +128,36 @@ class CronSchedule(PrefectBaseModel): @field_validator("timezone") @classmethod - def valid_timezone(cls, v): + def valid_timezone(cls, v: Optional[str]) -> str: return default_timezone(v) @field_validator("cron") @classmethod - def valid_cron_string(cls, v): + def valid_cron_string(cls, v: str) -> str: return validate_cron_string(v) DEFAULT_ANCHOR_DATE = pendulum.date(2020, 1, 1) +def _rrule_dt( + rrule: dateutil.rrule.rrule, name: str = "_dtstart" +) -> Optional[datetime.datetime]: + return getattr(rrule, name, None) + + +def _rrule( + rruleset: dateutil.rrule.rruleset, name: str = "_rrule" +) -> list[dateutil.rrule.rrule]: + return getattr(rruleset, name, []) + + +def _rdates( + rrule: dateutil.rrule.rruleset, name: str = "_rdate" +) -> list[datetime.datetime]: + return getattr(rrule, name, []) + + class RRuleSchedule(PrefectBaseModel): """ RRule schedule, based on the iCalendar standard @@ -139,7 +178,7 @@ class RRuleSchedule(PrefectBaseModel): timezone (str, optional): a valid timezone string """ - model_config = ConfigDict(extra="forbid") + model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid") rrule: str timezone: Optional[str] = Field( @@ -148,58 +187,60 @@ class RRuleSchedule(PrefectBaseModel): @field_validator("rrule") @classmethod - def validate_rrule_str(cls, v): + def validate_rrule_str(cls, v: str) -> str: return validate_rrule_string(v) @classmethod - def from_rrule(cls, rrule: dateutil.rrule.rrule): + def from_rrule( + cls, rrule: Union[dateutil.rrule.rrule, dateutil.rrule.rruleset] + ) -> "RRuleSchedule": if isinstance(rrule, dateutil.rrule.rrule): - if rrule._dtstart.tzinfo is not None: - timezone = rrule._dtstart.tzinfo.name + dtstart = _rrule_dt(rrule) + if dtstart and dtstart.tzinfo is not None: + timezone = dtstart.tzinfo.tzname(dtstart) else: timezone = "UTC" return RRuleSchedule(rrule=str(rrule), timezone=timezone) - elif isinstance(rrule, dateutil.rrule.rruleset): - dtstarts = [rr._dtstart for rr in rrule._rrule if rr._dtstart is not None] - unique_dstarts = set(pendulum.instance(d).in_tz("UTC") for d in dtstarts) - unique_timezones = set(d.tzinfo for d in dtstarts if d.tzinfo is not None) - - if len(unique_timezones) > 1: - raise ValueError( - f"rruleset has too many dtstart timezones: {unique_timezones}" - ) - - if len(unique_dstarts) > 1: - raise ValueError(f"rruleset has too many dtstarts: {unique_dstarts}") - - if unique_dstarts and unique_timezones: - timezone = dtstarts[0].tzinfo.name - else: - timezone = "UTC" - - rruleset_string = "" - if rrule._rrule: - rruleset_string += "\n".join(str(r) for r in rrule._rrule) - if rrule._exrule: - rruleset_string += "\n" if rruleset_string else "" - rruleset_string += "\n".join(str(r) for r in rrule._exrule).replace( - "RRULE", "EXRULE" - ) - if rrule._rdate: - rruleset_string += "\n" if rruleset_string else "" - rruleset_string += "RDATE:" + ",".join( - rd.strftime("%Y%m%dT%H%M%SZ") for rd in rrule._rdate - ) - if rrule._exdate: - rruleset_string += "\n" if rruleset_string else "" - rruleset_string += "EXDATE:" + ",".join( - exd.strftime("%Y%m%dT%H%M%SZ") for exd in rrule._exdate - ) - return RRuleSchedule(rrule=rruleset_string, timezone=timezone) + rrules = _rrule(rrule) + dtstarts = [dts for rr in rrules if (dts := _rrule_dt(rr)) is not None] + unique_dstarts = set(pendulum.instance(d).in_tz("UTC") for d in dtstarts) + unique_timezones = set(d.tzinfo for d in dtstarts if d.tzinfo is not None) + + if len(unique_timezones) > 1: + raise ValueError( + f"rruleset has too many dtstart timezones: {unique_timezones}" + ) + + if len(unique_dstarts) > 1: + raise ValueError(f"rruleset has too many dtstarts: {unique_dstarts}") + + if unique_dstarts and unique_timezones: + [unique_tz] = unique_timezones + timezone = unique_tz.tzname(dtstarts[0]) else: - raise ValueError(f"Invalid RRule object: {rrule}") - - def to_rrule(self) -> dateutil.rrule.rrule: + timezone = "UTC" + + rruleset_string = "" + if rrules: + rruleset_string += "\n".join(str(r) for r in rrules) + if exrule := _rrule(rrule, "_exrule"): + rruleset_string += "\n" if rruleset_string else "" + rruleset_string += "\n".join(str(r) for r in exrule).replace( + "RRULE", "EXRULE" + ) + if rdates := _rdates(rrule): + rruleset_string += "\n" if rruleset_string else "" + rruleset_string += "RDATE:" + ",".join( + rd.strftime("%Y%m%dT%H%M%SZ") for rd in rdates + ) + if exdates := _rdates(rrule, "_exdate"): + rruleset_string += "\n" if rruleset_string else "" + rruleset_string += "EXDATE:" + ",".join( + exd.strftime("%Y%m%dT%H%M%SZ") for exd in exdates + ) + return RRuleSchedule(rrule=rruleset_string, timezone=timezone) + + def to_rrule(self) -> Union[dateutil.rrule.rrule, dateutil.rrule.rruleset]: """ Since rrule doesn't properly serialize/deserialize timezones, we localize dates here @@ -211,51 +252,53 @@ def to_rrule(self) -> dateutil.rrule.rrule: ) timezone = dateutil.tz.gettz(self.timezone) if isinstance(rrule, dateutil.rrule.rrule): - kwargs = dict(dtstart=rrule._dtstart.replace(tzinfo=timezone)) - if rrule._until: + dtstart = _rrule_dt(rrule) + assert dtstart is not None + kwargs: dict[str, Any] = dict(dtstart=dtstart.replace(tzinfo=timezone)) + if until := _rrule_dt(rrule, "_until"): kwargs.update( - until=rrule._until.replace(tzinfo=timezone), + until=until.replace(tzinfo=timezone), ) return rrule.replace(**kwargs) - elif isinstance(rrule, dateutil.rrule.rruleset): - # update rrules - localized_rrules = [] - for rr in rrule._rrule: - kwargs = dict(dtstart=rr._dtstart.replace(tzinfo=timezone)) - if rr._until: - kwargs.update( - until=rr._until.replace(tzinfo=timezone), - ) - localized_rrules.append(rr.replace(**kwargs)) - rrule._rrule = localized_rrules - - # update exrules - localized_exrules = [] - for exr in rrule._exrule: - kwargs = dict(dtstart=exr._dtstart.replace(tzinfo=timezone)) - if exr._until: - kwargs.update( - until=exr._until.replace(tzinfo=timezone), - ) - localized_exrules.append(exr.replace(**kwargs)) - rrule._exrule = localized_exrules - - # update rdates - localized_rdates = [] - for rd in rrule._rdate: - localized_rdates.append(rd.replace(tzinfo=timezone)) - rrule._rdate = localized_rdates - - # update exdates - localized_exdates = [] - for exd in rrule._exdate: - localized_exdates.append(exd.replace(tzinfo=timezone)) - rrule._exdate = localized_exdates - - return rrule + + # update rrules + localized_rrules: list[dateutil.rrule.rrule] = [] + for rr in _rrule(rrule): + dtstart = _rrule_dt(rr) + assert dtstart is not None + kwargs: dict[str, Any] = dict(dtstart=dtstart.replace(tzinfo=timezone)) + if until := _rrule_dt(rr, "_until"): + kwargs.update(until=until.replace(tzinfo=timezone)) + localized_rrules.append(rr.replace(**kwargs)) + setattr(rrule, "_rrule", localized_rrules) + + # update exrules + localized_exrules: list[dateutil.rrule.rruleset] = [] + for exr in _rrule(rrule, "_exrule"): + dtstart = _rrule_dt(exr) + assert dtstart is not None + kwargs = dict(dtstart=dtstart.replace(tzinfo=timezone)) + if until := _rrule_dt(exr, "_until"): + kwargs.update(until=until.replace(tzinfo=timezone)) + localized_exrules.append(exr.replace(**kwargs)) + setattr(rrule, "_exrule", localized_exrules) + + # update rdates + localized_rdates: list[datetime.datetime] = [] + for rd in _rdates(rrule): + localized_rdates.append(rd.replace(tzinfo=timezone)) + setattr(rrule, "_rdate", localized_rdates) + + # update exdates + localized_exdates: list[datetime.datetime] = [] + for exd in _rdates(rrule, "_exdate"): + localized_exdates.append(exd.replace(tzinfo=timezone)) + setattr(rrule, "_exdate", localized_exdates) + + return rrule @field_validator("timezone") - def valid_timezone(cls, v): + def valid_timezone(cls, v: Optional[str]) -> str: """ Validate that the provided timezone is a valid IANA timezone. @@ -277,7 +320,7 @@ def valid_timezone(cls, v): class NoSchedule(PrefectBaseModel): - model_config = ConfigDict(extra="forbid") + model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid") SCHEDULE_TYPES: TypeAlias = Union[ @@ -326,7 +369,7 @@ def construct_schedule( if isinstance(interval, (int, float)): interval = datetime.timedelta(seconds=interval) if not anchor_date: - anchor_date = DateTime.now() + anchor_date = pendulum.DateTime.now() schedule = IntervalSchedule( interval=interval, anchor_date=anchor_date, timezone=timezone ) diff --git a/src/prefect/client/subscriptions.py b/src/prefect/client/subscriptions.py index c2ebf0ab673e..8e04b3735e8a 100644 --- a/src/prefect/client/subscriptions.py +++ b/src/prefect/client/subscriptions.py @@ -1,5 +1,7 @@ import asyncio -from typing import Any, Dict, Generic, Iterable, Optional, Type, TypeVar +from collections.abc import Iterable +from logging import Logger +from typing import Any, Generic, Optional, TypeVar import orjson import websockets @@ -11,7 +13,7 @@ from prefect.logging import get_logger from prefect.settings import PREFECT_API_KEY -logger = get_logger(__name__) +logger: Logger = get_logger(__name__) S = TypeVar("S", bound=IDBaseModel) @@ -19,7 +21,7 @@ class Subscription(Generic[S]): def __init__( self, - model: Type[S], + model: type[S], path: str, keys: Iterable[str], client_id: Optional[str] = None, @@ -27,27 +29,33 @@ def __init__( ): self.model = model self.client_id = client_id - base_url = base_url.replace("http", "ws", 1) - self.subscription_url = f"{base_url}{path}" + base_url = base_url.replace("http", "ws", 1) if base_url else None + self.subscription_url: str = f"{base_url}{path}" - self.keys = list(keys) + self.keys: list[str] = list(keys) self._connect = websockets.connect( self.subscription_url, - subprotocols=["prefect"], + subprotocols=[websockets.Subprotocol("prefect")], ) self._websocket = None def __aiter__(self) -> Self: return self + @property + def websocket(self) -> websockets.WebSocketClientProtocol: + if not self._websocket: + raise RuntimeError("Subscription is not connected") + return self._websocket + async def __anext__(self) -> S: while True: try: await self._ensure_connected() - message = await self._websocket.recv() + message = await self.websocket.recv() - await self._websocket.send(orjson.dumps({"type": "ack"}).decode()) + await self.websocket.send(orjson.dumps({"type": "ack"}).decode()) return self.model.model_validate_json(message) except ( @@ -72,10 +80,10 @@ async def _ensure_connected(self): ).decode() ) - auth: Dict[str, Any] = orjson.loads(await websocket.recv()) + auth: dict[str, Any] = orjson.loads(await websocket.recv()) assert auth["type"] == "auth_success", auth.get("message") - message = {"type": "subscribe", "keys": self.keys} + message: dict[str, Any] = {"type": "subscribe", "keys": self.keys} if self.client_id: message.update({"client_id": self.client_id}) @@ -84,13 +92,19 @@ async def _ensure_connected(self): AssertionError, websockets.exceptions.ConnectionClosedError, ) as e: - if isinstance(e, AssertionError) or e.rcvd.code == WS_1008_POLICY_VIOLATION: + if isinstance(e, AssertionError) or ( + e.rcvd and e.rcvd.code == WS_1008_POLICY_VIOLATION + ): if isinstance(e, AssertionError): reason = e.args[0] - elif isinstance(e, websockets.exceptions.ConnectionClosedError): + elif e.rcvd and e.rcvd.reason: reason = e.rcvd.reason + else: + reason = "unknown" + else: + reason = None - if isinstance(e, AssertionError) or e.rcvd.code == WS_1008_POLICY_VIOLATION: + if reason: raise Exception( "Unable to authenticate to the subscription. Please " "ensure the provided `PREFECT_API_KEY` you are using is " diff --git a/src/prefect/client/utilities.py b/src/prefect/client/utilities.py index ffe42e63195f..75bbd24b5d14 100644 --- a/src/prefect/client/utilities.py +++ b/src/prefect/client/utilities.py @@ -5,31 +5,32 @@ # This module must not import from `prefect.client` when it is imported to avoid # circular imports for decorators such as `inject_client` which are widely used. +from collections.abc import Awaitable, Coroutine from functools import wraps -from typing import ( - TYPE_CHECKING, - Any, - Awaitable, - Callable, - Coroutine, - Optional, - Tuple, - TypeVar, - cast, -) - -from typing_extensions import Concatenate, ParamSpec +from typing import TYPE_CHECKING, Any, Callable, Optional, Union, overload + +from typing_extensions import Concatenate, ParamSpec, TypeGuard, TypeVar if TYPE_CHECKING: - from prefect.client.orchestration import PrefectClient + from prefect.client.orchestration import PrefectClient, SyncPrefectClient P = ParamSpec("P") -R = TypeVar("R") +R = TypeVar("R", infer_variance=True) + + +def _current_async_client( + client: Union["PrefectClient", "SyncPrefectClient"], +) -> TypeGuard["PrefectClient"]: + """Determine if the client is a PrefectClient instance attached to the current loop""" + from prefect._internal.concurrency.event_loop import get_running_loop + + # Only a PrefectClient will have a _loop attribute that is the current loop + return getattr(client, "_loop", None) == get_running_loop() def get_or_create_client( client: Optional["PrefectClient"] = None, -) -> Tuple["PrefectClient", bool]: +) -> tuple["PrefectClient", bool]: """ Returns provided client, infers a client from context if available, or creates a new client. @@ -41,29 +42,22 @@ def get_or_create_client( """ if client is not None: return client, True - from prefect._internal.concurrency.event_loop import get_running_loop + from prefect.context import AsyncClientContext, FlowRunContext, TaskRunContext async_client_context = AsyncClientContext.get() flow_run_context = FlowRunContext.get() task_run_context = TaskRunContext.get() - if async_client_context and async_client_context.client._loop == get_running_loop(): - return async_client_context.client, True - elif ( - flow_run_context - and getattr(flow_run_context.client, "_loop", None) == get_running_loop() - ): - return flow_run_context.client, True - elif ( - task_run_context - and getattr(task_run_context.client, "_loop", None) == get_running_loop() - ): - return task_run_context.client, True - else: - from prefect.client.orchestration import get_client as get_httpx_client + for context in (async_client_context, flow_run_context, task_run_context): + if context is None: + continue + if _current_async_client(context_client := context.client): + return context_client, True - return get_httpx_client(), False + from prefect.client.orchestration import get_client as get_httpx_client + + return get_httpx_client(), False def client_injector( @@ -77,9 +71,23 @@ async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: return wrapper +@overload def inject_client( fn: Callable[P, Coroutine[Any, Any, R]], ) -> Callable[P, Coroutine[Any, Any, R]]: + ... + + +@overload +def inject_client( + fn: Callable[P, R], +) -> Callable[P, R]: + ... + + +def inject_client( + fn: Callable[P, Union[Coroutine[Any, Any, R], R]], +) -> Callable[P, Union[Coroutine[Any, Any, R], R]]: """ Simple helper to provide a context managed client to an asynchronous function. @@ -90,16 +98,18 @@ def inject_client( @wraps(fn) async def with_injected_client(*args: P.args, **kwargs: P.kwargs) -> R: - client = cast(Optional["PrefectClient"], kwargs.pop("client", None)) - client, inferred = get_or_create_client(client) + given = kwargs.pop("client", None) + if TYPE_CHECKING: + assert given is None or isinstance(given, PrefectClient) + client, inferred = get_or_create_client(given) if not inferred: context = client else: from prefect.utilities.asyncutils import asyncnullcontext - context = asyncnullcontext() + context = asyncnullcontext(client) async with context as new_client: - kwargs.setdefault("client", new_client or client) + kwargs |= {"client": new_client} return await fn(*args, **kwargs) return with_injected_client diff --git a/src/prefect/concurrency/asyncio.py b/src/prefect/concurrency/asyncio.py index cb8a751f8aa1..5d419a6c079f 100644 --- a/src/prefect/concurrency/asyncio.py +++ b/src/prefect/concurrency/asyncio.py @@ -17,7 +17,6 @@ from prefect.client.orchestration import get_client from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse from prefect.logging.loggers import get_run_logger -from prefect.utilities.asyncutils import sync_compatible from .context import ConcurrencyContext from .events import ( @@ -79,7 +78,7 @@ async def main(): names = names if isinstance(names, list) else [names] - limits = await _acquire_concurrency_slots( + limits = await _aacquire_concurrency_slots( names, occupy, timeout_seconds=timeout_seconds, @@ -95,7 +94,7 @@ async def main(): finally: occupancy_period = cast(Interval, (pendulum.now("UTC") - acquisition_time)) try: - await _release_concurrency_slots( + await _arelease_concurrency_slots( names, occupy, occupancy_period.total_seconds() ) except anyio.get_cancelled_exc_class(): @@ -138,7 +137,7 @@ async def rate_limit( names = names if isinstance(names, list) else [names] - limits = await _acquire_concurrency_slots( + limits = await _aacquire_concurrency_slots( names, occupy, mode="rate_limit", @@ -149,7 +148,6 @@ async def rate_limit( _emit_concurrency_acquisition_events(limits, occupy) -@sync_compatible @deprecated_parameter( name="create_if_missing", start_date="Sep 2024", @@ -157,10 +155,10 @@ async def rate_limit( when=lambda x: x is not None, help="Limits must be explicitly created before acquiring concurrency slots; see `strict` if you want to enforce this behavior.", ) -async def _acquire_concurrency_slots( +async def _aacquire_concurrency_slots( names: List[str], slots: int, - mode: Union[Literal["concurrency"], Literal["rate_limit"]] = "concurrency", + mode: Literal["concurrency", "rate_limit"] = "concurrency", timeout_seconds: Optional[float] = None, create_if_missing: Optional[bool] = None, max_retries: Optional[int] = None, @@ -199,8 +197,7 @@ async def _acquire_concurrency_slots( return retval -@sync_compatible -async def _release_concurrency_slots( +async def _arelease_concurrency_slots( names: List[str], slots: int, occupancy_seconds: float ) -> List[MinimalConcurrencyLimitResponse]: async with get_client() as client: diff --git a/src/prefect/concurrency/sync.py b/src/prefect/concurrency/sync.py index f7e02415743d..2f6bf47a3df6 100644 --- a/src/prefect/concurrency/sync.py +++ b/src/prefect/concurrency/sync.py @@ -9,6 +9,9 @@ ) import pendulum +from typing_extensions import Literal + +from prefect.utilities.asyncutils import run_coro_as_sync try: from pendulum import Interval @@ -19,8 +22,8 @@ from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse from .asyncio import ( - _acquire_concurrency_slots, - _release_concurrency_slots, + _aacquire_concurrency_slots, + _arelease_concurrency_slots, ) from .events import ( _emit_concurrency_acquisition_events, @@ -30,6 +33,36 @@ T = TypeVar("T") +def _release_concurrency_slots( + names: List[str], slots: int, occupancy_seconds: float +) -> List[MinimalConcurrencyLimitResponse]: + result = run_coro_as_sync( + _arelease_concurrency_slots(names, slots, occupancy_seconds) + ) + if result is None: + raise RuntimeError("Failed to release concurrency slots") + return result + + +def _acquire_concurrency_slots( + names: List[str], + slots: int, + mode: Literal["concurrency", "rate_limit"] = "concurrency", + timeout_seconds: Optional[float] = None, + create_if_missing: Optional[bool] = None, + max_retries: Optional[int] = None, + strict: bool = False, +) -> List[MinimalConcurrencyLimitResponse]: + result = run_coro_as_sync( + _aacquire_concurrency_slots( + names, slots, mode, timeout_seconds, create_if_missing, max_retries, strict + ) + ) + if result is None: + raise RuntimeError("Failed to acquire concurrency slots") + return result + + @contextmanager def concurrency( names: Union[str, List[str]], @@ -81,7 +114,6 @@ def main(): create_if_missing=create_if_missing, strict=strict, max_retries=max_retries, - _sync=True, ) acquisition_time = pendulum.now("UTC") emitted_events = _emit_concurrency_acquisition_events(limits, occupy) @@ -94,7 +126,6 @@ def main(): names, occupy, occupancy_period.total_seconds(), - _sync=True, ) _emit_concurrency_release_events(limits, occupy, emitted_events) @@ -134,6 +165,5 @@ def rate_limit( timeout_seconds=timeout_seconds, create_if_missing=create_if_missing, strict=strict, - _sync=True, ) _emit_concurrency_acquisition_events(limits, occupy) diff --git a/src/prefect/context.py b/src/prefect/context.py index 2b391f57b1b9..287b9b58e138 100644 --- a/src/prefect/context.py +++ b/src/prefect/context.py @@ -25,9 +25,7 @@ Union, ) -import pendulum from pydantic import BaseModel, ConfigDict, Field, PrivateAttr -from pydantic_extra_types.pendulum_dt import DateTime from typing_extensions import Self import prefect.logging @@ -44,12 +42,17 @@ get_default_persist_setting_for_tasks, ) from prefect.settings import Profile, Settings -from prefect.settings.legacy import _get_settings_fields +from prefect.settings.legacy import ( + _get_settings_fields, # type: ignore[reportPrivateUsage] +) from prefect.states import State from prefect.task_runners import TaskRunner +from prefect.types import DateTime from prefect.utilities.services import start_client_metrics_server T = TypeVar("T") +P = TypeVar("P") +R = TypeVar("R") if TYPE_CHECKING: from prefect.flows import Flow @@ -121,8 +124,8 @@ class ContextModel(BaseModel): """ # The context variable for storing data must be defined by the child class - __var__: ContextVar - _token: Optional[Token] = PrivateAttr(None) + __var__: ContextVar[Self] + _token: Optional[Token[Self]] = PrivateAttr(None) model_config = ConfigDict( arbitrary_types_allowed=True, extra="forbid", @@ -150,7 +153,7 @@ def get(cls: Type[Self]) -> Optional[Self]: return cls.__var__.get(None) def model_copy( - self: Self, *, update: Optional[Dict[str, Any]] = None, deep: bool = False + self: Self, *, update: Optional[Mapping[str, Any]] = None, deep: bool = False ): """ Duplicate the context model, optionally choosing which fields to include, exclude, or change. @@ -199,14 +202,14 @@ class SyncClientContext(ContextModel): assert c1 is ctx.client """ - __var__ = ContextVar("sync-client-context") + __var__: ContextVar[Self] = ContextVar("sync-client-context") client: SyncPrefectClient _httpx_settings: Optional[dict[str, Any]] = PrivateAttr(None) _context_stack: int = PrivateAttr(0) def __init__(self, httpx_settings: Optional[dict[str, Any]] = None): super().__init__( - client=get_client(sync_client=True, httpx_settings=httpx_settings), + client=get_client(sync_client=True, httpx_settings=httpx_settings), # type: ignore[reportCallIssue] ) self._httpx_settings = httpx_settings self._context_stack = 0 @@ -220,11 +223,11 @@ def __enter__(self): else: return self - def __exit__(self, *exc_info): + def __exit__(self, *exc_info: Any): self._context_stack -= 1 if self._context_stack == 0: - self.client.__exit__(*exc_info) - return super().__exit__(*exc_info) + self.client.__exit__(*exc_info) # type: ignore[reportUnknownMemberType] + return super().__exit__(*exc_info) # type: ignore[reportUnknownMemberType] @classmethod @contextmanager @@ -264,12 +267,12 @@ class AsyncClientContext(ContextModel): def __init__(self, httpx_settings: Optional[dict[str, Any]] = None): super().__init__( - client=get_client(sync_client=False, httpx_settings=httpx_settings), + client=get_client(sync_client=False, httpx_settings=httpx_settings), # type: ignore[reportCallIssue] ) self._httpx_settings = httpx_settings self._context_stack = 0 - async def __aenter__(self): + async def __aenter__(self: Self) -> Self: self._context_stack += 1 if self._context_stack == 1: await self.client.__aenter__() @@ -278,11 +281,11 @@ async def __aenter__(self): else: return self - async def __aexit__(self, *exc_info): + async def __aexit__(self: Self, *exc_info: Any) -> None: self._context_stack -= 1 if self._context_stack == 0: - await self.client.__aexit__(*exc_info) - return super().__exit__(*exc_info) + await self.client.__aexit__(*exc_info) # type: ignore[reportUnknownMemberType] + return super().__exit__(*exc_info) # type: ignore[reportUnknownMemberType] @classmethod @asynccontextmanager @@ -305,19 +308,20 @@ class RunContext(ContextModel): client: The Prefect client instance being used for API communication """ - def __init__(self, *args, **kwargs): + def __init__(self, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) start_client_metrics_server() - start_time: DateTime = Field(default_factory=lambda: pendulum.now("UTC")) + start_time: DateTime = Field(default_factory=lambda: DateTime.now("UTC")) input_keyset: Optional[Dict[str, Dict[str, str]]] = None client: Union[PrefectClient, SyncPrefectClient] - def serialize(self): + def serialize(self: Self, include_secrets: bool = True) -> Dict[str, Any]: return self.model_dump( include={"start_time", "input_keyset"}, exclude_unset=True, + context={"include_secrets": include_secrets}, ) @@ -336,9 +340,9 @@ class EngineContext(RunContext): flow_run_states: A list of states for flow runs created within this flow run """ - flow: Optional["Flow"] = None + flow: Optional["Flow[Any, Any]"] = None flow_run: Optional[FlowRun] = None - task_runner: TaskRunner + task_runner: TaskRunner[Any] log_prints: bool = False parameters: Optional[Dict[str, Any]] = None @@ -351,21 +355,21 @@ class EngineContext(RunContext): persist_result: bool = Field(default_factory=get_default_persist_setting) # Counter for task calls allowing unique - task_run_dynamic_keys: Dict[str, int] = Field(default_factory=dict) + task_run_dynamic_keys: Dict[str, Union[str, int]] = Field(default_factory=dict) # Counter for flow pauses observed_flow_pauses: Dict[str, int] = Field(default_factory=dict) # Tracking for result from task runs in this flow run for dependency tracking # Holds the ID of the object returned by the task run and task run state - task_run_results: Mapping[int, State] = Field(default_factory=dict) + task_run_results: dict[int, State] = Field(default_factory=dict) # Events worker to emit events events: Optional[EventsWorker] = None - __var__: ContextVar = ContextVar("flow_run") + __var__: ContextVar[Self] = ContextVar("flow_run") - def serialize(self): + def serialize(self: Self, include_secrets: bool = True) -> Dict[str, Any]: return self.model_dump( include={ "flow_run", @@ -379,6 +383,7 @@ def serialize(self): }, exclude_unset=True, serialize_as_any=True, + context={"include_secrets": include_secrets}, ) @@ -395,7 +400,7 @@ class TaskRunContext(RunContext): task_run: The API metadata for this task run """ - task: "Task" + task: "Task[Any, Any]" task_run: TaskRun log_prints: bool = False parameters: Dict[str, Any] @@ -406,7 +411,7 @@ class TaskRunContext(RunContext): __var__ = ContextVar("task_run") - def serialize(self): + def serialize(self: Self, include_secrets: bool = True) -> Dict[str, Any]: return self.model_dump( include={ "task_run", @@ -420,6 +425,7 @@ def serialize(self): }, exclude_unset=True, serialize_as_any=True, + context={"include_secrets": include_secrets}, ) @@ -438,7 +444,7 @@ def get(cls) -> "TagsContext": # Return an empty `TagsContext` instead of `None` if no context exists return cls.__var__.get(TagsContext()) - __var__: ContextVar = ContextVar("tags") + __var__: ContextVar[Self] = ContextVar("tags") class SettingsContext(ContextModel): @@ -455,9 +461,9 @@ class SettingsContext(ContextModel): profile: Profile settings: Settings - __var__: ContextVar = ContextVar("settings") + __var__: ContextVar[Self] = ContextVar("settings") - def __hash__(self) -> int: + def __hash__(self: Self) -> int: return hash(self.settings) @classmethod @@ -564,7 +570,7 @@ def tags(*new_tags: str) -> Generator[Set[str], None, None]: @contextmanager def use_profile( - profile: Union[Profile, str], + profile: Union[Profile, str, Any], override_environment_variables: bool = False, include_current_context: bool = True, ): @@ -664,7 +670,7 @@ def root_settings_context(): # an override in the `SettingsContext.get` method. -GLOBAL_SETTINGS_CONTEXT: SettingsContext = root_settings_context() +GLOBAL_SETTINGS_CONTEXT: SettingsContext = root_settings_context() # type: ignore[reportConstantRedefinition] # 2024-07-02: This surfaces an actionable error message for removed objects diff --git a/src/prefect/deployments/flow_runs.py b/src/prefect/deployments/flow_runs.py index 8c66b5d87bf9..07971410410e 100644 --- a/src/prefect/deployments/flow_runs.py +++ b/src/prefect/deployments/flow_runs.py @@ -113,10 +113,8 @@ async def run_deployment( task_run_ctx = TaskRunContext.get() if as_subflow and (flow_run_ctx or task_run_ctx): # TODO: this logic can likely be simplified by using `Task.create_run` - from prefect.utilities.engine import ( - _dynamic_key_for_task_run, - collect_task_run_inputs, - ) + from prefect.utilities._engine import dynamic_key_for_task_run + from prefect.utilities.engine import collect_task_run_inputs # This was called from a flow. Link the flow run as a subflow. task_inputs = { @@ -143,7 +141,7 @@ async def run_deployment( else task_run_ctx.task_run.flow_run_id ) dynamic_key = ( - _dynamic_key_for_task_run(flow_run_ctx, dummy_task) + dynamic_key_for_task_run(flow_run_ctx, dummy_task) if flow_run_ctx else task_run_ctx.task_run.dynamic_key ) diff --git a/src/prefect/deployments/runner.py b/src/prefect/deployments/runner.py index 56b7af2d5682..77b014703fe7 100644 --- a/src/prefect/deployments/runner.py +++ b/src/prefect/deployments/runner.py @@ -33,7 +33,7 @@ def fast_flow(): import tempfile from datetime import datetime, timedelta from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union +from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Union from uuid import UUID from pydantic import ( @@ -160,7 +160,7 @@ class RunnerDeployment(BaseModel): paused: Optional[bool] = Field( default=None, description="Whether or not the deployment is paused." ) - parameters: Dict[str, Any] = Field(default_factory=dict) + parameters: dict[str, Any] = Field(default_factory=dict) entrypoint: Optional[str] = Field( default=None, description=( @@ -198,7 +198,7 @@ class RunnerDeployment(BaseModel): " the deployment is registered with a built runner." ), ) - job_variables: Dict[str, Any] = Field( + job_variables: dict[str, Any] = Field( default_factory=dict, description=( "Job variables used to override the default values of a work pool" @@ -280,7 +280,7 @@ async def apply( async with get_client() as client: flow_id = await client.create_flow_from_name(self.flow_name) - create_payload = dict( + create_payload: dict[str, Any] = dict( flow_id=flow_id, name=self.name, work_queue_name=self.work_queue_name, @@ -428,7 +428,7 @@ def _construct_deployment_schedules( else: return [create_deployment_schedule_create(schedule)] - def _set_defaults_from_flow(self, flow: "Flow"): + def _set_defaults_from_flow(self, flow: "Flow[..., Any]"): self._parameter_openapi_schema = parameter_schema(flow) if not self.version: @@ -439,7 +439,7 @@ def _set_defaults_from_flow(self, flow: "Flow"): @classmethod def from_flow( cls, - flow: "Flow", + flow: "Flow[..., Any]", name: str, interval: Optional[ Union[Iterable[Union[int, float, timedelta]], int, float, timedelta] @@ -449,7 +449,7 @@ def from_flow( paused: Optional[bool] = None, schedules: Optional["FlexibleScheduleList"] = None, concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None, - parameters: Optional[dict] = None, + parameters: Optional[dict[str, Any]] = None, triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None, description: Optional[str] = None, tags: Optional[List[str]] = None, @@ -457,7 +457,7 @@ def from_flow( enforce_parameter_schema: bool = True, work_pool_name: Optional[str] = None, work_queue_name: Optional[str] = None, - job_variables: Optional[Dict[str, Any]] = None, + job_variables: Optional[dict[str, Any]] = None, entrypoint_type: EntrypointType = EntrypointType.FILE_PATH, ) -> "RunnerDeployment": """ @@ -588,7 +588,7 @@ def from_entrypoint( paused: Optional[bool] = None, schedules: Optional["FlexibleScheduleList"] = None, concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None, - parameters: Optional[dict] = None, + parameters: Optional[dict[str, Any]] = None, triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None, description: Optional[str] = None, tags: Optional[List[str]] = None, @@ -596,7 +596,7 @@ def from_entrypoint( enforce_parameter_schema: bool = True, work_pool_name: Optional[str] = None, work_queue_name: Optional[str] = None, - job_variables: Optional[Dict[str, Any]] = None, + job_variables: Optional[dict[str, Any]] = None, ) -> "RunnerDeployment": """ Configure a deployment for a given flow located at a given entrypoint. @@ -689,7 +689,7 @@ async def from_storage( paused: Optional[bool] = None, schedules: Optional["FlexibleScheduleList"] = None, concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None, - parameters: Optional[dict] = None, + parameters: Optional[dict[str, Any]] = None, triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None, description: Optional[str] = None, tags: Optional[List[str]] = None, @@ -697,7 +697,7 @@ async def from_storage( enforce_parameter_schema: bool = True, work_pool_name: Optional[str] = None, work_queue_name: Optional[str] = None, - job_variables: Optional[Dict[str, Any]] = None, + job_variables: Optional[dict[str, Any]] = None, ): """ Create a RunnerDeployment from a flow located at a given entrypoint and stored in a @@ -945,8 +945,8 @@ def local_flow(): console.print(f"Successfully pushed image {image.reference!r}", style="green") - deployment_exceptions = [] - deployment_ids = [] + deployment_exceptions: list[dict[str, Any]] = [] + deployment_ids: list[UUID] = [] image_ref = image.reference if image else None for deployment in track( deployments, diff --git a/src/prefect/deployments/steps/core.py b/src/prefect/deployments/steps/core.py index 8938ad46a6e6..ef6118b297a9 100644 --- a/src/prefect/deployments/steps/core.py +++ b/src/prefect/deployments/steps/core.py @@ -99,7 +99,9 @@ def _get_function_for_step( return step_func -async def run_step(step: Dict, upstream_outputs: Optional[Dict] = None) -> Dict: +async def run_step( + step: dict[str, Any], upstream_outputs: Optional[dict[str, Any]] = None +) -> dict[str, Any]: """ Runs a step, returns the step's output. diff --git a/src/prefect/deployments/steps/pull.py b/src/prefect/deployments/steps/pull.py index 8f2a82f54cb9..55a2b17347cf 100644 --- a/src/prefect/deployments/steps/pull.py +++ b/src/prefect/deployments/steps/pull.py @@ -6,10 +6,11 @@ from pathlib import Path from typing import TYPE_CHECKING, Any, Optional +from prefect._internal.compatibility.async_dispatch import async_dispatch from prefect._internal.retries import retry_async_fn from prefect.logging.loggers import get_logger from prefect.runner.storage import BlockStorageAdapter, GitRepository, RemoteStorage -from prefect.utilities.asyncutils import sync_compatible +from prefect.utilities.asyncutils import run_coro_as_sync deployment_logger = get_logger("deployment") @@ -17,7 +18,7 @@ from prefect.blocks.core import Block -def set_working_directory(directory: str) -> dict: +def set_working_directory(directory: str) -> dict[str, str]: """ Sets the working directory; works with both absolute and relative paths. @@ -37,15 +38,64 @@ def set_working_directory(directory: str) -> dict: base_delay=1, max_delay=10, retry_on_exceptions=(RuntimeError,), + operation_name="git_clone", ) -@sync_compatible -async def git_clone( +async def _pull_git_repository_with_retries(repo: GitRepository): + await repo.pull_code() + + +async def agit_clone( + repository: str, + branch: Optional[str] = None, + include_submodules: bool = False, + access_token: Optional[str] = None, + credentials: Optional["Block"] = None, +) -> dict[str, str]: + """ + Asynchronously clones a git repository into the current working directory. + + Args: + repository: the URL of the repository to clone + branch: the branch to clone; if not provided, the default branch will be used + include_submodules (bool): whether to include git submodules when cloning the repository + access_token: an access token to use for cloning the repository; if not provided + the repository will be cloned using the default git credentials + credentials: a GitHubCredentials, GitLabCredentials, or BitBucketCredentials block can be used to specify the + credentials to use for cloning the repository. + + Returns: + dict: a dictionary containing a `directory` key of the new directory that was created + + Raises: + subprocess.CalledProcessError: if the git clone command fails for any reason + """ + if access_token and credentials: + raise ValueError( + "Please provide either an access token or credentials but not both." + ) + + _credentials = {"access_token": access_token} if access_token else credentials + + storage = GitRepository( + url=repository, + credentials=_credentials, + branch=branch, + include_submodules=include_submodules, + ) + + await _pull_git_repository_with_retries(storage) + + return dict(directory=str(storage.destination.relative_to(Path.cwd()))) + + +@async_dispatch(agit_clone) +def git_clone( repository: str, branch: Optional[str] = None, include_submodules: bool = False, access_token: Optional[str] = None, credentials: Optional["Block"] = None, -) -> dict: +) -> dict[str, str]: """ Clones a git repository into the current working directory. @@ -120,20 +170,18 @@ async def git_clone( "Please provide either an access token or credentials but not both." ) - credentials = {"access_token": access_token} if access_token else credentials + _credentials = {"access_token": access_token} if access_token else credentials storage = GitRepository( url=repository, - credentials=credentials, + credentials=_credentials, branch=branch, include_submodules=include_submodules, ) - await storage.pull_code() + run_coro_as_sync(_pull_git_repository_with_retries(storage)) - directory = str(storage.destination.relative_to(Path.cwd())) - deployment_logger.info(f"Cloned repository {repository!r} into {directory!r}") - return {"directory": directory} + return dict(directory=str(storage.destination.relative_to(Path.cwd()))) async def pull_from_remote_storage(url: str, **settings: Any): @@ -190,7 +238,7 @@ async def pull_with_block(block_document_name: str, block_type_slug: str): full_slug = f"{block_type_slug}/{block_document_name}" try: - block = await Block.load(full_slug) + block = await Block.aload(full_slug) except Exception: deployment_logger.exception("Unable to load block '%s'", full_slug) raise diff --git a/src/prefect/events/clients.py b/src/prefect/events/clients.py index bd09eb3ab20c..4efd7ba1107a 100644 --- a/src/prefect/events/clients.py +++ b/src/prefect/events/clients.py @@ -1,11 +1,13 @@ import abc import asyncio +import os from types import TracebackType from typing import ( TYPE_CHECKING, Any, ClassVar, Dict, + Generator, List, MutableMapping, Optional, @@ -13,20 +15,22 @@ Type, cast, ) +from urllib.parse import urlparse from uuid import UUID import orjson import pendulum from cachetools import TTLCache from prometheus_client import Counter +from python_socks.async_.asyncio import Proxy from typing_extensions import Self from websockets import Subprotocol -from websockets.client import WebSocketClientProtocol, connect from websockets.exceptions import ( ConnectionClosed, ConnectionClosedError, ConnectionClosedOK, ) +from websockets.legacy.client import Connect, WebSocketClientProtocol from prefect.events import Event from prefect.logging import get_logger @@ -80,6 +84,53 @@ def events_out_socket_from_api_url(url: str): return http_to_ws(url) + "/events/out" +class WebsocketProxyConnect(Connect): + def __init__(self: Self, uri: str, **kwargs: Any): + # super() is intentionally deferred to the _proxy_connect method + # to allow for the socket to be established first + + self.uri = uri + self._kwargs = kwargs + + u = urlparse(uri) + host = u.hostname + + if u.scheme == "ws": + port = u.port or 80 + proxy_url = os.environ.get("HTTP_PROXY") + elif u.scheme == "wss": + port = u.port or 443 + proxy_url = os.environ.get("HTTPS_PROXY") + kwargs["server_hostname"] = host + else: + raise ValueError( + "Unsupported scheme %s. Expected 'ws' or 'wss'. " % u.scheme + ) + + self._proxy = Proxy.from_url(proxy_url) if proxy_url else None + self._host = host + self._port = port + + async def _proxy_connect(self: Self) -> WebSocketClientProtocol: + if self._proxy: + sock = await self._proxy.connect( + dest_host=self._host, + dest_port=self._port, + ) + self._kwargs["sock"] = sock + + super().__init__(self.uri, **self._kwargs) + proto = await self.__await_impl__() + return proto + + def __await__(self: Self) -> Generator[Any, None, WebSocketClientProtocol]: + return self._proxy_connect().__await__() + + +def websocket_connect(uri: str, **kwargs: Any) -> WebsocketProxyConnect: + return WebsocketProxyConnect(uri, **kwargs) + + def get_events_client( reconnection_attempts: int = 10, checkpoint_every: int = 700, @@ -265,7 +316,7 @@ def __init__( ) self._events_socket_url = events_in_socket_from_api_url(api_url) - self._connect = connect(self._events_socket_url) + self._connect = websocket_connect(self._events_socket_url) self._websocket = None self._reconnection_attempts = reconnection_attempts self._unconfirmed_events = [] @@ -435,7 +486,7 @@ def __init__( reconnection_attempts=reconnection_attempts, checkpoint_every=checkpoint_every, ) - self._connect = connect( + self._connect = websocket_connect( self._events_socket_url, extra_headers={"Authorization": f"bearer {api_key}"}, ) @@ -494,7 +545,7 @@ def __init__( logger.debug("Connecting to %s", socket_url) - self._connect = connect( + self._connect = websocket_connect( socket_url, subprotocols=[Subprotocol("prefect")], ) diff --git a/src/prefect/events/filters.py b/src/prefect/events/filters.py index 9143c43a8689..f969e9ccb651 100644 --- a/src/prefect/events/filters.py +++ b/src/prefect/events/filters.py @@ -3,9 +3,9 @@ import pendulum from pydantic import Field, PrivateAttr -from pydantic_extra_types.pendulum_dt import DateTime from prefect._internal.schemas.bases import PrefectBaseModel +from prefect.types import DateTime from prefect.utilities.collections import AutoEnum from .schemas.events import Event, Resource, ResourceSpecification diff --git a/src/prefect/events/related.py b/src/prefect/events/related.py index c2218e92903b..ee36db860352 100644 --- a/src/prefect/events/related.py +++ b/src/prefect/events/related.py @@ -15,7 +15,8 @@ from uuid import UUID import pendulum -from pendulum.datetime import DateTime + +from prefect.types import DateTime from .schemas.events import RelatedResource diff --git a/src/prefect/events/schemas/events.py b/src/prefect/events/schemas/events.py index f143c959be5b..7e7ddc6b9c5a 100644 --- a/src/prefect/events/schemas/events.py +++ b/src/prefect/events/schemas/events.py @@ -20,7 +20,6 @@ RootModel, model_validator, ) -from pydantic_extra_types.pendulum_dt import DateTime from typing_extensions import Annotated, Self from prefect._internal.schemas.bases import PrefectBaseModel @@ -28,6 +27,7 @@ from prefect.settings import ( PREFECT_EVENTS_MAXIMUM_LABELS_PER_RESOURCE, ) +from prefect.types import DateTime from .labelling import Labelled diff --git a/src/prefect/events/utilities.py b/src/prefect/events/utilities.py index 106f479090e6..b1a04a96a725 100644 --- a/src/prefect/events/utilities.py +++ b/src/prefect/events/utilities.py @@ -3,7 +3,6 @@ from uuid import UUID import pendulum -from pydantic_extra_types.pendulum_dt import DateTime from .clients import ( AssertingEventsClient, @@ -20,11 +19,12 @@ def emit_event( event: str, resource: Dict[str, str], - occurred: Optional[DateTime] = None, + occurred: Optional[pendulum.DateTime] = None, related: Optional[Union[List[Dict[str, str]], List[RelatedResource]]] = None, payload: Optional[Dict[str, Any]] = None, id: Optional[UUID] = None, follows: Optional[Event] = None, + **kwargs: Optional[Dict[str, Any]], ) -> Optional[Event]: """ Send an event to Prefect Cloud. @@ -63,6 +63,7 @@ def emit_event( event_kwargs: Dict[str, Any] = { "event": event, "resource": resource, + **kwargs, } if occurred is None: diff --git a/src/prefect/events/worker.py b/src/prefect/events/worker.py index b1fa30baebf1..0adb06a549db 100644 --- a/src/prefect/events/worker.py +++ b/src/prefect/events/worker.py @@ -83,6 +83,14 @@ async def _handle(self, event: Event): await self._client.emit(event) async def attach_related_resources_from_context(self, event: Event): + if "prefect.resource.lineage-group" in event.resource: + # We attach related resources to lineage events in `emit_lineage_event`, + # instead of the worker, because not all run-related resources are + # upstream from every lineage event (they might be downstream). + # The "related" field in the event schema tracks upstream resources + # only. + return + exclude = {resource.id for resource in event.involved_resources} event.related += await related_resources_from_run_context( client=self._orchestration_client, exclude=exclude diff --git a/src/prefect/filesystems.py b/src/prefect/filesystems.py index 333665fd5679..97b7ee1e2e26 100644 --- a/src/prefect/filesystems.py +++ b/src/prefect/filesystems.py @@ -1,6 +1,7 @@ import abc import urllib.parse from pathlib import Path +from shutil import copytree from typing import Any, Dict, Optional import anyio @@ -13,7 +14,6 @@ ) from prefect.blocks.core import Block from prefect.utilities.asyncutils import run_sync_in_worker_thread, sync_compatible -from prefect.utilities.compat import copytree from prefect.utilities.filesystem import filter_files from ._internal.compatibility.migration import getattr_migration @@ -158,7 +158,7 @@ async def get_directory( copytree(from_path, local_path, dirs_exist_ok=True, ignore=ignore_func) async def _get_ignore_func(self, local_path: str, ignore_file: str): - with open(ignore_file, "r") as f: + with open(ignore_file) as f: ignore_patterns = f.readlines() included_files = filter_files(root=local_path, ignore_patterns=ignore_patterns) @@ -348,7 +348,7 @@ async def put_directory( included_files = None if ignore_file: - with open(ignore_file, "r") as f: + with open(ignore_file) as f: ignore_patterns = f.readlines() included_files = filter_files( diff --git a/src/prefect/flow_engine.py b/src/prefect/flow_engine.py index 239d9306ffb1..8ac8b1200e56 100644 --- a/src/prefect/flow_engine.py +++ b/src/prefect/flow_engine.py @@ -2,7 +2,7 @@ import logging import os import time -from contextlib import ExitStack, asynccontextmanager, contextmanager +from contextlib import ExitStack, asynccontextmanager, contextmanager, nullcontext from dataclasses import dataclass, field from typing import ( Any, @@ -23,11 +23,9 @@ from uuid import UUID from anyio import CancelScope -from opentelemetry import trace -from opentelemetry.trace import Tracer, get_tracer +from opentelemetry import propagate, trace from typing_extensions import ParamSpec -import prefect from prefect import Task from prefect.client.orchestration import PrefectClient, SyncPrefectClient, get_client from prefect.client.schemas import FlowRun, TaskRun @@ -72,6 +70,14 @@ exception_to_failed_state, return_value_to_state, ) +from prefect.telemetry.run_telemetry import ( + LABELS_TRACEPARENT_KEY, + TRACEPARENT_KEY, + OTELSetter, + RunTelemetry, +) +from prefect.types import KeyValueLabels +from prefect.utilities._engine import get_hook_name, resolve_custom_flow_run_name from prefect.utilities.annotations import NotSet from prefect.utilities.asyncutils import run_coro_as_sync from prefect.utilities.callables import ( @@ -81,8 +87,6 @@ ) from prefect.utilities.collections import visit_collection from prefect.utilities.engine import ( - _get_hook_name, - _resolve_custom_flow_run_name, capture_sigterm, link_state_to_result, propose_state, @@ -133,10 +137,7 @@ class BaseFlowRunEngine(Generic[P, R]): _is_started: bool = False short_circuit: bool = False _flow_run_name_set: bool = False - _tracer: Tracer = field( - default_factory=lambda: get_tracer("prefect", prefect.__version__) - ) - _span: Optional[trace.Span] = None + _telemetry: RunTelemetry = field(default_factory=RunTelemetry) def __post_init__(self): if self.flow is None and self.flow_run_id is None: @@ -149,21 +150,6 @@ def __post_init__(self): def state(self) -> State: return self.flow_run.state # type: ignore - def _end_span_on_success(self): - if not self._span: - return - self._span.set_status(trace.Status(trace.StatusCode.OK)) - self._span.end(time.time_ns()) - self._span = None - - def _end_span_on_error(self, exc: BaseException, description: Optional[str]): - if not self._span: - return - self._span.record_exception(exc) - self._span.set_status(trace.Status(trace.StatusCode.ERROR, description)) - self._span.end(time.time_ns()) - self._span = None - def is_running(self) -> bool: if getattr(self, "flow_run", None) is None: return False @@ -178,6 +164,39 @@ def cancel_all_tasks(self): if hasattr(self.flow.task_runner, "cancel_all"): self.flow.task_runner.cancel_all() # type: ignore + def _update_otel_labels( + self, span: trace.Span, client: Union[SyncPrefectClient, PrefectClient] + ): + parent_flow_run_ctx = FlowRunContext.get() + + if parent_flow_run_ctx and parent_flow_run_ctx.flow_run: + if traceparent := parent_flow_run_ctx.flow_run.labels.get( + LABELS_TRACEPARENT_KEY + ): + carrier: KeyValueLabels = {TRACEPARENT_KEY: traceparent} + propagate.get_global_textmap().inject( + carrier={TRACEPARENT_KEY: traceparent}, + setter=OTELSetter(), + ) + + else: + carrier: KeyValueLabels = {} + propagate.get_global_textmap().inject( + carrier, + context=trace.set_span_in_context(span), + setter=OTELSetter(), + ) + if carrier.get(TRACEPARENT_KEY): + if self.flow_run: + client.update_flow_run_labels( + flow_run_id=self.flow_run.id, + labels={LABELS_TRACEPARENT_KEY: carrier[TRACEPARENT_KEY]}, + ) + else: + self.logger.info( + f"Tried to set traceparent {carrier[TRACEPARENT_KEY]} for flow run, but None was found" + ) + @dataclass class FlowRunEngine(BaseFlowRunEngine[P, R]): @@ -281,16 +300,7 @@ def set_state(self, state: State, force: bool = False) -> State: self.flow_run.state_name = state.name # type: ignore self.flow_run.state_type = state.type # type: ignore - if self._span: - self._span.add_event( - state.name, - { - "prefect.state.message": state.message or "", - "prefect.state.type": state.type, - "prefect.state.name": state.name or state.type, - "prefect.state.id": str(state.id), - }, - ) + self._telemetry.update_state(state) return state def result(self, raise_on_failure: bool = True) -> "Union[R, State, None]": @@ -340,7 +350,7 @@ def handle_success(self, result: R) -> R: self.set_state(terminal_state) self._return_value = resolved_result - self._end_span_on_success() + self._telemetry.end_span_on_success() return result @@ -372,8 +382,8 @@ def handle_exception( ) state = self.set_state(Running()) self._raised = exc - - self._end_span_on_error(exc, state.message) + self._telemetry.record_exception(exc) + self._telemetry.end_span_on_failure(state.message) return state @@ -392,8 +402,8 @@ def handle_timeout(self, exc: TimeoutError) -> None: ) self.set_state(state) self._raised = exc - - self._end_span_on_error(exc, message) + self._telemetry.record_exception(exc) + self._telemetry.end_span_on_failure(message) def handle_crash(self, exc: BaseException) -> None: state = run_coro_as_sync(exception_to_crashed_state(exc)) @@ -401,8 +411,8 @@ def handle_crash(self, exc: BaseException) -> None: self.logger.debug("Crash details:", exc_info=exc) self.set_state(state, force=True) self._raised = exc - - self._end_span_on_error(exc, state.message) + self._telemetry.record_exception(exc) + self._telemetry.end_span_on_failure(state.message if state else None) def load_subflow_run( self, @@ -537,7 +547,7 @@ def call_hooks(self, state: Optional[State] = None): hooks = None for hook in hooks or []: - hook_name = _get_hook_name(hook) + hook_name = get_hook_name(hook) try: self.logger.info( @@ -600,7 +610,7 @@ def setup_run_context(self, client: Optional[SyncPrefectClient] = None): # update the flow run name if necessary if not self._flow_run_name_set and self.flow.flow_run_name: - flow_run_name = _resolve_custom_flow_run_name( + flow_run_name = resolve_custom_flow_run_name( flow=self.flow, parameters=self.parameters ) self.client.set_flow_run_name( @@ -646,16 +656,17 @@ def initialize_run(self): flow_version=self.flow.version, empirical_policy=self.flow_run.empirical_policy, ) - - self._span = self._tracer.start_span( - name=self.flow_run.name, - attributes={ - **self.flow_run.labels, - "prefect.run.type": "flow", - "prefect.run.id": str(self.flow_run.id), - "prefect.tags": self.flow_run.tags, - "prefect.flow.name": self.flow.name, - }, + parent_flow_run = FlowRunContext.get() + parent_labels = {} + if parent_flow_run and parent_flow_run.flow_run: + parent_labels = parent_flow_run.flow_run.labels + + self._telemetry.start_span( + name=self.flow.name, + run=self.flow_run, + client=self.client, + parameters=self.parameters, + parent_labels=parent_labels, ) try: @@ -698,12 +709,15 @@ def initialize_run(self): @contextmanager def start(self) -> Generator[None, None, None]: - with self.initialize_run(), trace.use_span(self._span): - self.begin_run() + with self.initialize_run(): + with trace.use_span( + self._telemetry.span + ) if self._telemetry.span else nullcontext(): + self.begin_run() - if self.state.is_running(): - self.call_hooks() - yield + if self.state.is_running(): + self.call_hooks() + yield @contextmanager def run_context(self): @@ -854,16 +868,7 @@ async def set_state(self, state: State, force: bool = False) -> State: self.flow_run.state_name = state.name # type: ignore self.flow_run.state_type = state.type # type: ignore - if self._span: - self._span.add_event( - state.name, - { - "prefect.state.message": state.message or "", - "prefect.state.type": state.type, - "prefect.state.name": state.name or state.type, - "prefect.state.id": str(state.id), - }, - ) + self._telemetry.update_state(state) return state async def result(self, raise_on_failure: bool = True) -> "Union[R, State, None]": @@ -911,7 +916,7 @@ async def handle_success(self, result: R) -> R: await self.set_state(terminal_state) self._return_value = resolved_result - self._end_span_on_success() + self._telemetry.end_span_on_success() return result @@ -941,8 +946,8 @@ async def handle_exception( ) state = await self.set_state(Running()) self._raised = exc - - self._end_span_on_error(exc, state.message) + self._telemetry.record_exception(exc) + self._telemetry.end_span_on_failure(state.message) return state @@ -962,7 +967,8 @@ async def handle_timeout(self, exc: TimeoutError) -> None: await self.set_state(state) self._raised = exc - self._end_span_on_error(exc, message) + self._telemetry.record_exception(exc) + self._telemetry.end_span_on_failure(message) async def handle_crash(self, exc: BaseException) -> None: # need to shield from asyncio cancellation to ensure we update the state @@ -974,7 +980,8 @@ async def handle_crash(self, exc: BaseException) -> None: await self.set_state(state, force=True) self._raised = exc - self._end_span_on_error(exc, state.message) + self._telemetry.record_exception(exc) + self._telemetry.end_span_on_failure(state.message) async def load_subflow_run( self, @@ -1107,7 +1114,7 @@ async def call_hooks(self, state: Optional[State] = None): hooks = None for hook in hooks or []: - hook_name = _get_hook_name(hook) + hook_name = get_hook_name(hook) try: self.logger.info( @@ -1170,7 +1177,7 @@ async def setup_run_context(self, client: Optional[PrefectClient] = None): # update the flow run name if necessary if not self._flow_run_name_set and self.flow.flow_run_name: - flow_run_name = _resolve_custom_flow_run_name( + flow_run_name = resolve_custom_flow_run_name( flow=self.flow, parameters=self.parameters ) await self.client.set_flow_run_name( @@ -1216,16 +1223,17 @@ async def initialize_run(self): flow_version=self.flow.version, empirical_policy=self.flow_run.empirical_policy, ) - - self._span = self._tracer.start_span( - name=self.flow_run.name, - attributes={ - **self.flow_run.labels, - "prefect.run.type": "flow", - "prefect.run.id": str(self.flow_run.id), - "prefect.tags": self.flow_run.tags, - "prefect.flow.name": self.flow.name, - }, + parent_flow_run = FlowRunContext.get() + parent_labels = {} + if parent_flow_run and parent_flow_run.flow_run: + parent_labels = parent_flow_run.flow_run.labels + + await self._telemetry.async_start_span( + name=self.flow.name, + run=self.flow_run, + client=self.client, + parameters=self.parameters, + parent_labels=parent_labels, ) try: @@ -1269,7 +1277,9 @@ async def initialize_run(self): @asynccontextmanager async def start(self) -> AsyncGenerator[None, None]: async with self.initialize_run(): - with trace.use_span(self._span): + with trace.use_span( + self._telemetry.span + ) if self._telemetry.span else nullcontext(): await self.begin_run() if self.state.is_running(): @@ -1392,7 +1402,7 @@ async def run_generator_flow_async( flow: Flow[P, R], flow_run: Optional[FlowRun] = None, parameters: Optional[Dict[str, Any]] = None, - wait_for: Optional[Iterable[PrefectFuture]] = None, + wait_for: Optional[Iterable[PrefectFuture[R]]] = None, return_type: Literal["state", "result"] = "result", ) -> AsyncGenerator[R, None]: if return_type != "result": @@ -1430,7 +1440,7 @@ def run_flow( flow: Flow[P, R], flow_run: Optional[FlowRun] = None, parameters: Optional[Dict[str, Any]] = None, - wait_for: Optional[Iterable[PrefectFuture]] = None, + wait_for: Optional[Iterable[PrefectFuture[R]]] = None, return_type: Literal["state", "result"] = "result", ) -> Union[R, State, None]: kwargs = dict( diff --git a/src/prefect/flow_runs.py b/src/prefect/flow_runs.py index 46b6c4d7bce2..526f0793d6b1 100644 --- a/src/prefect/flow_runs.py +++ b/src/prefect/flow_runs.py @@ -1,6 +1,6 @@ from typing import ( TYPE_CHECKING, - Dict, + Any, Optional, Type, TypeVar, @@ -307,7 +307,7 @@ async def suspend_flow_run( flow_run_id: Optional[UUID] = None, timeout: Optional[int] = 3600, key: Optional[str] = None, - client: PrefectClient = None, + client: Optional[PrefectClient] = None, ) -> None: ... @@ -318,7 +318,7 @@ async def suspend_flow_run( flow_run_id: Optional[UUID] = None, timeout: Optional[int] = 3600, key: Optional[str] = None, - client: PrefectClient = None, + client: Optional[PrefectClient] = None, ) -> T: ... @@ -330,7 +330,7 @@ async def suspend_flow_run( flow_run_id: Optional[UUID] = None, timeout: Optional[int] = 3600, key: Optional[str] = None, - client: PrefectClient = None, + client: Optional[PrefectClient] = None, ) -> Optional[T]: """ Suspends a flow run by stopping code execution until resumed. @@ -430,7 +430,9 @@ async def suspend_flow_run( @sync_compatible -async def resume_flow_run(flow_run_id, run_input: Optional[Dict] = None): +async def resume_flow_run( + flow_run_id: UUID, run_input: Optional[dict[str, Any]] = None +) -> None: """ Resumes a paused flow. diff --git a/src/prefect/flows.py b/src/prefect/flows.py index fcc4e675bda6..4f402499a94c 100644 --- a/src/prefect/flows.py +++ b/src/prefect/flows.py @@ -23,13 +23,10 @@ Awaitable, Callable, Coroutine, - Dict, Generic, Iterable, - List, NoReturn, Optional, - Set, Tuple, Type, TypeVar, @@ -45,7 +42,7 @@ from pydantic.v1.decorator import ValidatedFunction as V1ValidatedFunction from pydantic.v1.errors import ConfigError # TODO from rich.console import Console -from typing_extensions import Literal, ParamSpec, Self +from typing_extensions import Literal, ParamSpec, TypeAlias from prefect._internal.concurrency.api import create_call, from_async from prefect.blocks.core import Block @@ -107,7 +104,11 @@ T = TypeVar("T") # Generic type var for capturing the inner return type of async funcs R = TypeVar("R") # The return type of the user's function P = ParamSpec("P") # The parameters of the flow -F = TypeVar("F", bound="Flow") # The type of the flow +F = TypeVar("F", bound="Flow[Any, Any]") # The type of the flow + +StateHookCallable: TypeAlias = Callable[ + [FlowSchema, FlowRun, State], Union[Awaitable[None], None] +] logger = get_logger("flows") @@ -186,7 +187,9 @@ def __init__( flow_run_name: Optional[Union[Callable[[], str], str]] = None, retries: Optional[int] = None, retry_delay_seconds: Optional[Union[int, float]] = None, - task_runner: Union[Type[TaskRunner], TaskRunner, None] = None, + task_runner: Union[ + Type[TaskRunner[PrefectFuture[R]]], TaskRunner[PrefectFuture[R]], None + ] = None, description: Optional[str] = None, timeout_seconds: Union[int, float, None] = None, validate_parameters: bool = True, @@ -195,15 +198,11 @@ def __init__( result_serializer: Optional[ResultSerializer] = None, cache_result_in_memory: bool = True, log_prints: Optional[bool] = None, - on_completion: Optional[ - List[Callable[[FlowSchema, FlowRun, State], None]] - ] = None, - on_failure: Optional[List[Callable[[FlowSchema, FlowRun, State], None]]] = None, - on_cancellation: Optional[ - List[Callable[[FlowSchema, FlowRun, State], None]] - ] = None, - on_crashed: Optional[List[Callable[[FlowSchema, FlowRun, State], None]]] = None, - on_running: Optional[List[Callable[[FlowSchema, FlowRun, State], None]]] = None, + on_completion: Optional[list[StateHookCallable]] = None, + on_failure: Optional[list[StateHookCallable]] = None, + on_cancellation: Optional[list[StateHookCallable]] = None, + on_crashed: Optional[list[StateHookCallable]] = None, + on_running: Optional[list[StateHookCallable]] = None, ): if name is not None and not isinstance(name, str): raise TypeError( @@ -375,7 +374,7 @@ def __init__( def ismethod(self) -> bool: return hasattr(self.fn, "__prefect_self__") - def __get__(self, instance, owner): + def __get__(self, instance: Any, owner: Any): """ Implement the descriptor protocol so that the flow can be used as an instance method. When an instance method is loaded, this method is called with the "self" instance as @@ -402,7 +401,9 @@ def with_options( retry_delay_seconds: Optional[Union[int, float]] = None, description: Optional[str] = None, flow_run_name: Optional[Union[Callable[[], str], str]] = None, - task_runner: Union[Type[TaskRunner], TaskRunner, None] = None, + task_runner: Union[ + Type[TaskRunner[PrefectFuture[R]]], TaskRunner[PrefectFuture[R]], None + ] = None, timeout_seconds: Union[int, float, None] = None, validate_parameters: Optional[bool] = None, persist_result: Optional[bool] = NotSet, # type: ignore @@ -410,16 +411,12 @@ def with_options( result_serializer: Optional[ResultSerializer] = NotSet, # type: ignore cache_result_in_memory: Optional[bool] = None, log_prints: Optional[bool] = NotSet, # type: ignore - on_completion: Optional[ - List[Callable[[FlowSchema, FlowRun, State], None]] - ] = None, - on_failure: Optional[List[Callable[[FlowSchema, FlowRun, State], None]]] = None, - on_cancellation: Optional[ - List[Callable[[FlowSchema, FlowRun, State], None]] - ] = None, - on_crashed: Optional[List[Callable[[FlowSchema, FlowRun, State], None]]] = None, - on_running: Optional[List[Callable[[FlowSchema, FlowRun, State], None]]] = None, - ) -> Self: + on_completion: Optional[list[StateHookCallable]] = None, + on_failure: Optional[list[StateHookCallable]] = None, + on_cancellation: Optional[list[StateHookCallable]] = None, + on_crashed: Optional[list[StateHookCallable]] = None, + on_running: Optional[list[StateHookCallable]] = None, + ) -> "Flow[P, R]": """ Create a new flow from the current object, updating provided options. @@ -522,7 +519,7 @@ def with_options( new_flow._entrypoint = self._entrypoint return new_flow - def validate_parameters(self, parameters: Dict[str, Any]) -> Dict[str, Any]: + def validate_parameters(self, parameters: dict[str, Any]) -> dict[str, Any]: """ Validate parameters for compatibility with the flow by attempting to cast the inputs to the associated types specified by the function's type annotations. @@ -567,14 +564,12 @@ def resolve_block_reference(data: Any) -> Any: "Cannot mix Pydantic v1 and v2 types as arguments to a flow." ) + validated_fn_kwargs = dict(arbitrary_types_allowed=True) + if has_v1_models: - validated_fn = V1ValidatedFunction( - self.fn, config={"arbitrary_types_allowed": True} - ) + validated_fn = V1ValidatedFunction(self.fn, config=validated_fn_kwargs) else: - validated_fn = V2ValidatedFunction( - self.fn, config=pydantic.ConfigDict(arbitrary_types_allowed=True) - ) + validated_fn = V2ValidatedFunction(self.fn, config=validated_fn_kwargs) try: with warnings.catch_warnings(): @@ -599,7 +594,7 @@ def resolve_block_reference(data: Any) -> Any: } return cast_parameters - def serialize_parameters(self, parameters: Dict[str, Any]) -> Dict[str, Any]: + def serialize_parameters(self, parameters: dict[str, Any]) -> dict[str, Any]: """ Convert parameters to a serializable form. @@ -645,15 +640,15 @@ async def to_deployment( paused: Optional[bool] = None, schedules: Optional["FlexibleScheduleList"] = None, concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None, - parameters: Optional[dict] = None, - triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None, + parameters: Optional[dict[str, Any]] = None, + triggers: Optional[list[Union[DeploymentTriggerTypes, TriggerTypes]]] = None, description: Optional[str] = None, - tags: Optional[List[str]] = None, + tags: Optional[list[str]] = None, version: Optional[str] = None, enforce_parameter_schema: bool = True, work_pool_name: Optional[str] = None, work_queue_name: Optional[str] = None, - job_variables: Optional[Dict[str, Any]] = None, + job_variables: Optional[dict[str, Any]] = None, entrypoint_type: EntrypointType = EntrypointType.FILE_PATH, ) -> "RunnerDeployment": """ @@ -755,33 +750,23 @@ def my_other_flow(name): entrypoint_type=entrypoint_type, ) - def on_completion( - self, fn: Callable[["Flow", FlowRun, State], None] - ) -> Callable[["Flow", FlowRun, State], None]: + def on_completion(self, fn: StateHookCallable) -> StateHookCallable: self.on_completion_hooks.append(fn) return fn - def on_cancellation( - self, fn: Callable[["Flow", FlowRun, State], None] - ) -> Callable[["Flow", FlowRun, State], None]: + def on_cancellation(self, fn: StateHookCallable) -> StateHookCallable: self.on_cancellation_hooks.append(fn) return fn - def on_crashed( - self, fn: Callable[["Flow", FlowRun, State], None] - ) -> Callable[["Flow", FlowRun, State], None]: + def on_crashed(self, fn: StateHookCallable) -> StateHookCallable: self.on_crashed_hooks.append(fn) return fn - def on_running( - self, fn: Callable[["Flow", FlowRun, State], None] - ) -> Callable[["Flow", FlowRun, State], None]: + def on_running(self, fn: StateHookCallable) -> StateHookCallable: self.on_running_hooks.append(fn) return fn - def on_failure( - self, fn: Callable[["Flow", FlowRun, State], None] - ) -> Callable[["Flow", FlowRun, State], None]: + def on_failure(self, fn: StateHookCallable) -> StateHookCallable: self.on_failure_hooks.append(fn) return fn @@ -801,10 +786,10 @@ def serve( paused: Optional[bool] = None, schedules: Optional["FlexibleScheduleList"] = None, global_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None, - triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None, - parameters: Optional[dict] = None, + triggers: Optional[list[Union[DeploymentTriggerTypes, TriggerTypes]]] = None, + parameters: Optional[dict[str, Any]] = None, description: Optional[str] = None, - tags: Optional[List[str]] = None, + tags: Optional[list[str]] = None, version: Optional[str] = None, enforce_parameter_schema: bool = True, pause_on_shutdown: bool = True, @@ -1039,8 +1024,11 @@ def my_flow(name: str = "world"): await storage.pull_code() full_entrypoint = str(storage.destination / entrypoint) - flow: Flow = await from_async.wait_for_call_in_new_thread( - create_call(load_flow_from_entrypoint, full_entrypoint) + flow = cast( + Flow[P, R], + await from_async.wait_for_call_in_new_thread( + create_call(load_flow_from_entrypoint, full_entrypoint) + ), ) flow._storage = storage flow._entrypoint = entrypoint @@ -1056,17 +1044,17 @@ async def deploy( build: bool = True, push: bool = True, work_queue_name: Optional[str] = None, - job_variables: Optional[dict] = None, + job_variables: Optional[dict[str, Any]] = None, interval: Optional[Union[int, float, datetime.timedelta]] = None, cron: Optional[str] = None, rrule: Optional[str] = None, paused: Optional[bool] = None, - schedules: Optional[List[DeploymentScheduleCreate]] = None, + schedules: Optional[list[DeploymentScheduleCreate]] = None, concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None, - triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None, - parameters: Optional[dict] = None, + triggers: Optional[list[Union[DeploymentTriggerTypes, TriggerTypes]]] = None, + parameters: Optional[dict[str, Any]] = None, description: Optional[str] = None, - tags: Optional[List[str]] = None, + tags: Optional[list[str]] = None, version: Optional[str] = None, enforce_parameter_schema: bool = True, entrypoint_type: EntrypointType = EntrypointType.FILE_PATH, @@ -1289,7 +1277,7 @@ def __call__( self, *args: "P.args", return_state: bool = False, - wait_for: Optional[Iterable[PrefectFuture]] = None, + wait_for: Optional[Iterable[PrefectFuture[Any]]] = None, **kwargs: "P.kwargs", ): """ @@ -1361,7 +1349,7 @@ def __call__( ) @sync_compatible - async def visualize(self, *args, **kwargs): + async def visualize(self, *args: "P.args", **kwargs: "P.kwargs"): """ Generates a graphviz object representing the current flow. In IPython notebooks, it's rendered inline, otherwise in a new window as a PNG. @@ -1390,7 +1378,7 @@ async def visualize(self, *args, **kwargs): try: with TaskVizTracker() as tracker: if self.isasync: - await self.fn(*args, **kwargs) + await self.fn(*args, **kwargs) # type: ignore[reportGeneralTypeIssues] else: self.fn(*args, **kwargs) @@ -1433,7 +1421,7 @@ def flow( flow_run_name: Optional[Union[Callable[[], str], str]] = None, retries: Optional[int] = None, retry_delay_seconds: Optional[Union[int, float]] = None, - task_runner: Optional[TaskRunner] = None, + task_runner: Optional[TaskRunner[PrefectFuture[R]]] = None, description: Optional[str] = None, timeout_seconds: Union[int, float, None] = None, validate_parameters: bool = True, @@ -1442,30 +1430,24 @@ def flow( result_serializer: Optional[ResultSerializer] = None, cache_result_in_memory: bool = True, log_prints: Optional[bool] = None, - on_completion: Optional[ - List[Callable[[FlowSchema, FlowRun, State], Union[Awaitable[None], None]]] - ] = None, - on_failure: Optional[ - List[Callable[[FlowSchema, FlowRun, State], Union[Awaitable[None], None]]] - ] = None, - on_cancellation: Optional[ - List[Callable[[FlowSchema, FlowRun, State], None]] - ] = None, - on_crashed: Optional[List[Callable[[FlowSchema, FlowRun, State], None]]] = None, - on_running: Optional[List[Callable[[FlowSchema, FlowRun, State], None]]] = None, + on_completion: Optional[list[StateHookCallable]] = None, + on_failure: Optional[list[StateHookCallable]] = None, + on_cancellation: Optional[list[StateHookCallable]] = None, + on_crashed: Optional[list[StateHookCallable]] = None, + on_running: Optional[list[StateHookCallable]] = None, ) -> Callable[[Callable[P, R]], Flow[P, R]]: ... def flow( - __fn=None, + __fn: Optional[Callable[P, R]] = None, *, name: Optional[str] = None, version: Optional[str] = None, flow_run_name: Optional[Union[Callable[[], str], str]] = None, retries: Optional[int] = None, retry_delay_seconds: Union[int, float, None] = None, - task_runner: Optional[TaskRunner] = None, + task_runner: Optional[TaskRunner[PrefectFuture[R]]] = None, description: Optional[str] = None, timeout_seconds: Union[int, float, None] = None, validate_parameters: bool = True, @@ -1474,17 +1456,11 @@ def flow( result_serializer: Optional[ResultSerializer] = None, cache_result_in_memory: bool = True, log_prints: Optional[bool] = None, - on_completion: Optional[ - List[Callable[[FlowSchema, FlowRun, State], Union[Awaitable[None], None]]] - ] = None, - on_failure: Optional[ - List[Callable[[FlowSchema, FlowRun, State], Union[Awaitable[None], None]]] - ] = None, - on_cancellation: Optional[ - List[Callable[[FlowSchema, FlowRun, State], None]] - ] = None, - on_crashed: Optional[List[Callable[[FlowSchema, FlowRun, State], None]]] = None, - on_running: Optional[List[Callable[[FlowSchema, FlowRun, State], None]]] = None, + on_completion: Optional[list[StateHookCallable]] = None, + on_failure: Optional[list[StateHookCallable]] = None, + on_cancellation: Optional[list[StateHookCallable]] = None, + on_crashed: Optional[list[StateHookCallable]] = None, + on_running: Optional[list[StateHookCallable]] = None, ): """ Decorator to designate a function as a Prefect workflow. @@ -1593,30 +1569,27 @@ def flow( if isinstance(__fn, (classmethod, staticmethod)): method_decorator = type(__fn).__name__ raise TypeError(f"@{method_decorator} should be applied on top of @flow") - return cast( - Flow[P, R], - Flow( - fn=__fn, - name=name, - version=version, - flow_run_name=flow_run_name, - task_runner=task_runner, - description=description, - timeout_seconds=timeout_seconds, - validate_parameters=validate_parameters, - retries=retries, - retry_delay_seconds=retry_delay_seconds, - persist_result=persist_result, - result_storage=result_storage, - result_serializer=result_serializer, - cache_result_in_memory=cache_result_in_memory, - log_prints=log_prints, - on_completion=on_completion, - on_failure=on_failure, - on_cancellation=on_cancellation, - on_crashed=on_crashed, - on_running=on_running, - ), + return Flow( + fn=__fn, + name=name, + version=version, + flow_run_name=flow_run_name, + task_runner=task_runner, + description=description, + timeout_seconds=timeout_seconds, + validate_parameters=validate_parameters, + retries=retries, + retry_delay_seconds=retry_delay_seconds, + persist_result=persist_result, + result_storage=result_storage, + result_serializer=result_serializer, + cache_result_in_memory=cache_result_in_memory, + log_prints=log_prints, + on_completion=on_completion, + on_failure=on_failure, + on_cancellation=on_cancellation, + on_crashed=on_crashed, + on_running=on_running, ) else: return cast( @@ -1668,10 +1641,10 @@ def _raise_on_name_with_banned_characters(name: Optional[str]) -> Optional[str]: def select_flow( - flows: Iterable[Flow], + flows: Iterable[Flow[P, R]], flow_name: Optional[str] = None, from_message: Optional[str] = None, -) -> Flow: +) -> Flow[P, R]: """ Select the only flow in an iterable or a flow specified by name. @@ -1716,7 +1689,7 @@ def select_flow( def load_flow_from_entrypoint( entrypoint: str, use_placeholder_flow: bool = True, -) -> Flow: +) -> Flow[P, Any]: """ Extract a flow object from a script at an entrypoint by running all of the code in the file. @@ -1740,7 +1713,7 @@ def load_flow_from_entrypoint( else: path, func_name = entrypoint.rsplit(".", maxsplit=1) try: - flow = import_object(entrypoint) + flow: Flow[P, Any] = import_object(entrypoint) # pyright: ignore[reportRedeclaration] except AttributeError as exc: raise MissingFlowError( f"Flow function with name {func_name!r} not found in {path!r}. " @@ -1749,13 +1722,13 @@ def load_flow_from_entrypoint( # If the flow has dependencies that are not installed in the current # environment, fallback to loading the flow via AST parsing. if use_placeholder_flow: - flow = safe_load_flow_from_entrypoint(entrypoint) + flow: Optional[Flow[P, Any]] = safe_load_flow_from_entrypoint(entrypoint) if flow is None: raise else: raise - if not isinstance(flow, Flow): + if not isinstance(flow, Flow): # pyright: ignore[reportUnnecessaryIsInstance] raise MissingFlowError( f"Function with name {func_name!r} is not a flow. Make sure that it is " "decorated with '@flow'." @@ -1769,8 +1742,8 @@ def serve( pause_on_shutdown: bool = True, print_starting_message: bool = True, limit: Optional[int] = None, - **kwargs, -): + **kwargs: Any, +) -> None: """ Serve the provided list of deployments. @@ -1839,8 +1812,8 @@ async def aserve( pause_on_shutdown: bool = True, print_starting_message: bool = True, limit: Optional[int] = None, - **kwargs, -): + **kwargs: Any, +) -> None: """ Asynchronously serve the provided list of deployments. @@ -1945,7 +1918,7 @@ async def load_flow_from_flow_run( ignore_storage: bool = False, storage_base_path: Optional[str] = None, use_placeholder_flow: bool = True, -) -> Flow: +) -> Flow[P, Any]: """ Load a flow from the location/script provided in a deployment's storage document. @@ -2024,7 +1997,7 @@ async def load_flow_from_flow_run( return flow -def load_placeholder_flow(entrypoint: str, raises: Exception): +def load_placeholder_flow(entrypoint: str, raises: Exception) -> Flow[P, Any]: """ Load a placeholder flow that is initialized with the same arguments as the flow specified in the entrypoint. If called the flow will raise `raises`. @@ -2041,10 +2014,10 @@ def load_placeholder_flow(entrypoint: str, raises: Exception): def _base_placeholder(): raise raises - def sync_placeholder_flow(*args, **kwargs): + def sync_placeholder_flow(*args: "P.args", **kwargs: "P.kwargs"): _base_placeholder() - async def async_placeholder_flow(*args, **kwargs): + async def async_placeholder_flow(*args: "P.args", **kwargs: "P.kwargs"): _base_placeholder() placeholder_flow = ( @@ -2059,7 +2032,7 @@ async def async_placeholder_flow(*args, **kwargs): return Flow(**arguments) -def safe_load_flow_from_entrypoint(entrypoint: str) -> Optional[Flow]: +def safe_load_flow_from_entrypoint(entrypoint: str) -> Optional[Flow[P, Any]]: """ Load a flow from an entrypoint and return None if an exception is raised. @@ -2084,8 +2057,8 @@ def safe_load_flow_from_entrypoint(entrypoint: str) -> Optional[Flow]: def _sanitize_and_load_flow( - func_def: Union[ast.FunctionDef, ast.AsyncFunctionDef], namespace: Dict[str, Any] -) -> Optional[Flow]: + func_def: Union[ast.FunctionDef, ast.AsyncFunctionDef], namespace: dict[str, Any] +) -> Optional[Flow[P, Any]]: """ Attempt to load a flow from the function definition after sanitizing the annotations and defaults that can't be compiled. @@ -2122,7 +2095,7 @@ def _sanitize_and_load_flow( arg.annotation = None # Remove defaults that can't be compiled - new_defaults = [] + new_defaults: list[Any] = [] for default in func_def.args.defaults: try: code = compile(ast.Expression(default), "", "eval") @@ -2142,7 +2115,7 @@ def _sanitize_and_load_flow( func_def.args.defaults = new_defaults # Remove kw_defaults that can't be compiled - new_kw_defaults = [] + new_kw_defaults: list[Any] = [] for default in func_def.args.kw_defaults: if default is not None: try: @@ -2201,7 +2174,7 @@ def _sanitize_and_load_flow( def load_flow_arguments_from_entrypoint( - entrypoint: str, arguments: Optional[Union[List[str], Set[str]]] = None + entrypoint: str, arguments: Optional[Union[list[str], set[str]]] = None ) -> dict[str, Any]: """ Extract flow arguments from an entrypoint string. @@ -2235,7 +2208,7 @@ def load_flow_arguments_from_entrypoint( "log_prints", } - result = {} + result: dict[str, Any] = {} for decorator in func_def.decorator_list: if ( @@ -2248,7 +2221,7 @@ def load_flow_arguments_from_entrypoint( if isinstance(keyword.value, ast.Constant): # Use the string value of the argument - result[keyword.arg] = str(keyword.value.value) + result[cast(str, keyword.arg)] = str(keyword.value.value) continue # if the arg value is not a raw str (i.e. a variable or expression), @@ -2261,7 +2234,7 @@ def load_flow_arguments_from_entrypoint( try: evaluated_value = eval(cleaned_value, namespace) # type: ignore - result[keyword.arg] = str(evaluated_value) + result[cast(str, keyword.arg)] = str(evaluated_value) except Exception as e: logger.info( "Failed to parse @flow argument: `%s=%s` due to the following error. Ignoring and falling back to default behavior.", diff --git a/src/prefect/logging/configuration.py b/src/prefect/logging/configuration.py index da56d14e0063..9b666668e33d 100644 --- a/src/prefect/logging/configuration.py +++ b/src/prefect/logging/configuration.py @@ -58,7 +58,7 @@ def load_logging_config(path: Path) -> dict: return flatdict_to_dict(flat_config) -def setup_logging(incremental: Optional[bool] = None) -> dict: +def setup_logging(incremental: Optional[bool] = None) -> dict[str, Any]: """ Sets up logging. diff --git a/src/prefect/logging/highlighters.py b/src/prefect/logging/highlighters.py index 7b4bd1da2752..b842f7c95240 100644 --- a/src/prefect/logging/highlighters.py +++ b/src/prefect/logging/highlighters.py @@ -45,8 +45,7 @@ class StateHighlighter(RegexHighlighter): base_style = "state." highlights = [ - rf"(?P<{state.value.lower()}_state>{state.value.title()})" - for state in StateType + rf"(?P<{state.lower()}_state>{state.title()})" for state in StateType ] + [ r"(?PCached)(?=\(type=COMPLETED\))" # Highlight only "Cached" ] diff --git a/src/prefect/logging/loggers.py b/src/prefect/logging/loggers.py index 0f6d8b6f0a2b..4a9211a8d95b 100644 --- a/src/prefect/logging/loggers.py +++ b/src/prefect/logging/loggers.py @@ -4,12 +4,11 @@ from builtins import print from contextlib import contextmanager from functools import lru_cache -from logging import LoggerAdapter, LogRecord -from typing import TYPE_CHECKING, Dict, List, Optional, Union +from logging import LogRecord +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union from typing_extensions import Self -import prefect from prefect.exceptions import MissingContextError from prefect.logging.filters import ObfuscateApiKeyFilter from prefect.telemetry.logging import add_telemetry_log_handler @@ -22,8 +21,13 @@ from prefect.tasks import Task from prefect.workers.base import BaseWorker +if sys.version_info >= (3, 12): + LoggingAdapter = logging.LoggerAdapter[logging.Logger] +else: + LoggingAdapter = logging.LoggerAdapter -class PrefectLogAdapter(logging.LoggerAdapter): + +class PrefectLogAdapter(LoggingAdapter): """ Adapter that ensures extra kwargs are passed through correctly; without this the `extra` fields set on the adapter would overshadow any provided on a @@ -83,8 +87,8 @@ def get_logger(name: Optional[str] = None) -> logging.Logger: def get_run_logger( - context: Optional["RunContext"] = None, **kwargs: str -) -> Union[logging.Logger, logging.LoggerAdapter]: + context: Optional["RunContext"] = None, **kwargs: Any +) -> Union[logging.Logger, LoggingAdapter]: """ Get a Prefect logger for the current task run or flow run. @@ -103,15 +107,17 @@ def get_run_logger( Raises: MissingContextError: If no context can be found """ + from prefect.context import FlowRunContext, TaskRunContext + # Check for existing contexts - task_run_context = prefect.context.TaskRunContext.get() - flow_run_context = prefect.context.FlowRunContext.get() + task_run_context = TaskRunContext.get() + flow_run_context = FlowRunContext.get() # Apply the context override if context: - if isinstance(context, prefect.context.FlowRunContext): + if isinstance(context, FlowRunContext): flow_run_context = context - elif isinstance(context, prefect.context.TaskRunContext): + elif isinstance(context, TaskRunContext): task_run_context = context else: raise TypeError( @@ -130,7 +136,9 @@ def get_run_logger( ) elif flow_run_context: logger = flow_run_logger( - flow_run=flow_run_context.flow_run, flow=flow_run_context.flow, **kwargs + flow_run=flow_run_context.flow_run, # type: ignore + flow=flow_run_context.flow, + **kwargs, ) elif ( get_logger("prefect.flow_run").disabled @@ -151,9 +159,9 @@ def get_run_logger( def flow_run_logger( flow_run: Union["FlowRun", "ClientFlowRun"], - flow: Optional["Flow"] = None, + flow: Optional["Flow[Any, Any]"] = None, **kwargs: str, -) -> LoggerAdapter: +) -> LoggingAdapter: """ Create a flow run logger with the run's metadata attached. @@ -177,10 +185,10 @@ def flow_run_logger( def task_run_logger( task_run: "TaskRun", - task: "Task" = None, - flow_run: "FlowRun" = None, - flow: "Flow" = None, - **kwargs: str, + task: Optional["Task[Any, Any]"] = None, + flow_run: Optional["FlowRun"] = None, + flow: Optional["Flow[Any, Any]"] = None, + **kwargs: Any, ): """ Create a task run logger with the run's metadata attached. @@ -193,8 +201,10 @@ def task_run_logger( If only the flow run context is available, it will be used for default values of `flow_run` and `flow`. """ + from prefect.context import FlowRunContext + if not flow_run or not flow: - flow_run_context = prefect.context.FlowRunContext.get() + flow_run_context = FlowRunContext.get() if flow_run_context: flow_run = flow_run or flow_run_context.flow_run flow = flow or flow_run_context.flow @@ -269,7 +279,7 @@ def disable_run_logger(): yield -def print_as_log(*args, **kwargs): +def print_as_log(*args: Any, **kwargs: Any) -> None: """ A patch for `print` to send printed messages to the Prefect run logger. @@ -333,7 +343,7 @@ class LogEavesdropper(logging.Handler): # Outputs: "Hello, world!\nAnother one!" """ - _target_logger: logging.Logger + _target_logger: Optional[logging.Logger] _lines: List[str] def __init__(self, eavesdrop_on: str, level: int = logging.NOTSET): diff --git a/src/prefect/main.py b/src/prefect/main.py index 4fea3999e2ad..637e8be63fe2 100644 --- a/src/prefect/main.py +++ b/src/prefect/main.py @@ -1,4 +1,6 @@ # Import user-facing API +from typing import Any + from prefect.deployments import deploy from prefect.states import State from prefect.logging import get_run_logger @@ -9,8 +11,8 @@ from prefect.utilities.annotations import unmapped, allow_failure from prefect.results import BaseResult, ResultRecordMetadata from prefect.flow_runs import pause_flow_run, resume_flow_run, suspend_flow_run -from prefect.client.orchestration import get_client, PrefectClient -from prefect.client.cloud import get_cloud_client, CloudClient +from prefect.client.orchestration import get_client +from prefect.client.cloud import get_cloud_client import prefect.variables import prefect.runtime @@ -25,28 +27,17 @@ # Perform any forward-ref updates needed for Pydantic models import prefect.client.schemas -prefect.context.FlowRunContext.model_rebuild( - _types_namespace={ - "Flow": Flow, - "BaseResult": BaseResult, - "ResultRecordMetadata": ResultRecordMetadata, - } -) -prefect.context.TaskRunContext.model_rebuild( - _types_namespace={"Task": Task, "BaseResult": BaseResult} -) -prefect.client.schemas.State.model_rebuild( - _types_namespace={ - "BaseResult": BaseResult, - "ResultRecordMetadata": ResultRecordMetadata, - } -) -prefect.client.schemas.StateCreate.model_rebuild( - _types_namespace={ - "BaseResult": BaseResult, - "ResultRecordMetadata": ResultRecordMetadata, - } +_types: dict[str, Any] = dict( + Task=Task, + Flow=Flow, + BaseResult=BaseResult, + ResultRecordMetadata=ResultRecordMetadata, ) +prefect.context.FlowRunContext.model_rebuild(_types_namespace=_types) +prefect.context.TaskRunContext.model_rebuild(_types_namespace=_types) +prefect.client.schemas.State.model_rebuild(_types_namespace=_types) +prefect.client.schemas.StateCreate.model_rebuild(_types_namespace=_types) +prefect.client.schemas.OrchestrationResult.model_rebuild(_types_namespace=_types) Transaction.model_rebuild() # Configure logging @@ -76,6 +67,7 @@ "flow", "Flow", "get_client", + "get_cloud_client", "get_run_logger", "State", "tags", diff --git a/src/prefect/results.py b/src/prefect/results.py index dd17f614953d..19308a86b602 100644 --- a/src/prefect/results.py +++ b/src/prefect/results.py @@ -35,10 +35,13 @@ model_validator, ) from pydantic_core import PydanticUndefinedType -from pydantic_extra_types.pendulum_dt import DateTime from typing_extensions import ParamSpec, Self import prefect +from prefect._experimental.lineage import ( + emit_result_read_event, + emit_result_write_event, +) from prefect._internal.compatibility import deprecated from prefect._internal.compatibility.deprecated import deprecated_field from prefect.blocks.core import Block @@ -57,6 +60,7 @@ from prefect.logging import get_logger from prefect.serializers import PickleSerializer, Serializer from prefect.settings.context import get_current_settings +from prefect.types import DateTime from prefect.utilities.annotations import NotSet from prefect.utilities.asyncutils import sync_compatible from prefect.utilities.pydantic import get_dispatch_key, lookup_type, register_base_type @@ -129,7 +133,7 @@ async def resolve_result_storage( elif isinstance(result_storage, Path): storage_block = LocalFileSystem(basepath=str(result_storage)) elif isinstance(result_storage, str): - storage_block = await Block.load(result_storage, client=client) + storage_block = await Block.aload(result_storage, client=client) storage_block_id = storage_block._block_document_id assert storage_block_id is not None, "Loaded storage blocks must have ids" elif isinstance(result_storage, UUID): @@ -168,7 +172,7 @@ async def get_or_create_default_task_scheduling_storage() -> ResultStorage: default_block = settings.tasks.scheduling.default_storage_block if default_block is not None: - return await Block.load(default_block) + return await Block.aload(default_block) # otherwise, use the local file system basepath = settings.results.local_storage_path @@ -232,6 +236,10 @@ def _format_user_supplied_storage_key(key: str) -> str: T = TypeVar("T") +def default_cache() -> LRUCache[str, "ResultRecord[Any]"]: + return LRUCache(maxsize=1000) + + def result_storage_discriminator(x: Any) -> str: if isinstance(x, dict): if "block_type_slug" in x: @@ -284,7 +292,7 @@ class ResultStore(BaseModel): cache_result_in_memory: bool = Field(default=True) serializer: Serializer = Field(default_factory=get_default_result_serializer) storage_key_fn: Callable[[], str] = Field(default=DEFAULT_STORAGE_KEY_FN) - cache: LRUCache = Field(default_factory=lambda: LRUCache(maxsize=1000)) + cache: LRUCache[str, "ResultRecord[Any]"] = Field(default_factory=default_cache) # Deprecated fields persist_result: Optional[bool] = Field(default=None) @@ -319,7 +327,7 @@ async def update_for_flow(self, flow: "Flow") -> Self: return self.model_copy(update=update) @sync_compatible - async def update_for_task(self: Self, task: "Task") -> Self: + async def update_for_task(self: Self, task: "Task[P, R]") -> Self: """ Create a new result store for a task. @@ -446,8 +454,15 @@ async def aexists(self, key: str) -> bool: """ return await self._exists(key=key, _sync=False) + def _resolved_key_path(self, key: str) -> str: + if self.result_storage_block_id is None and hasattr( + self.result_storage, "_resolve_path" + ): + return str(self.result_storage._resolve_path(key)) + return key + @sync_compatible - async def _read(self, key: str, holder: str) -> "ResultRecord": + async def _read(self, key: str, holder: str) -> "ResultRecord[Any]": """ Read a result record from storage. @@ -465,8 +480,12 @@ async def _read(self, key: str, holder: str) -> "ResultRecord": if self.lock_manager is not None and not self.is_lock_holder(key, holder): await self.await_for_lock(key) - if key in self.cache: - return self.cache[key] + resolved_key_path = self._resolved_key_path(key) + + if resolved_key_path in self.cache: + cached_result = self.cache[resolved_key_path] + await emit_result_read_event(self, resolved_key_path, cached=True) + return cached_result if self.result_storage is None: self.result_storage = await get_default_result_storage() @@ -478,31 +497,28 @@ async def _read(self, key: str, holder: str) -> "ResultRecord": metadata.storage_key is not None ), "Did not find storage key in metadata" result_content = await self.result_storage.read_path(metadata.storage_key) - result_record = ResultRecord.deserialize_from_result_and_metadata( + result_record: ResultRecord[ + Any + ] = ResultRecord.deserialize_from_result_and_metadata( result=result_content, metadata=metadata_content ) + await emit_result_read_event(self, resolved_key_path) else: content = await self.result_storage.read_path(key) - result_record = ResultRecord.deserialize( + result_record: ResultRecord[Any] = ResultRecord.deserialize( content, backup_serializer=self.serializer ) + await emit_result_read_event(self, resolved_key_path) if self.cache_result_in_memory: - if self.result_storage_block_id is None and hasattr( - self.result_storage, "_resolve_path" - ): - cache_key = str(self.result_storage._resolve_path(key)) - else: - cache_key = key - - self.cache[cache_key] = result_record + self.cache[resolved_key_path] = result_record return result_record def read( self, key: str, holder: Optional[str] = None, - ) -> "ResultRecord": + ) -> "ResultRecord[Any]": """ Read a result record from storage. @@ -520,7 +536,7 @@ async def aread( self, key: str, holder: Optional[str] = None, - ) -> "ResultRecord": + ) -> "ResultRecord[Any]": """ Read a result record from storage. @@ -663,12 +679,13 @@ async def _persist_result_record(self, result_record: "ResultRecord", holder: st base_key, content=result_record.serialize_metadata(), ) + await emit_result_write_event(self, result_record.metadata.storage_key) # Otherwise, write the result metadata and result together else: await self.result_storage.write_path( result_record.metadata.storage_key, content=result_record.serialize() ) - + await emit_result_write_event(self, result_record.metadata.storage_key) if self.cache_result_in_memory: self.cache[key] = result_record @@ -898,7 +915,11 @@ async def store_parameters(self, identifier: UUID, parameters: Dict[str, Any]): ) @sync_compatible - async def read_parameters(self, identifier: UUID) -> Dict[str, Any]: + async def read_parameters(self, identifier: UUID) -> dict[str, Any]: + if self.result_storage is None: + raise ValueError( + "Result store is not configured - must have a result storage block to read parameters" + ) record = ResultRecord.deserialize( await self.result_storage.read_path(f"parameters/{identifier}") ) diff --git a/src/prefect/runner/runner.py b/src/prefect/runner/runner.py index 71622028bbd6..3e225117b2a1 100644 --- a/src/prefect/runner/runner.py +++ b/src/prefect/runner/runner.py @@ -43,7 +43,17 @@ def fast_flow(): from copy import deepcopy from functools import partial from pathlib import Path -from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Set, Union +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + List, + Optional, + Set, + Union, +) from uuid import UUID, uuid4 import anyio @@ -190,7 +200,7 @@ def goodbye_flow(name): self._cancelling_flow_run_ids = set() self._scheduled_task_scopes = set() self._deployment_ids: Set[UUID] = set() - self._flow_run_process_map: Dict[UUID, Dict] = dict() + self._flow_run_process_map: dict[UUID, dict[str, Any]] = dict() self._tmp_dir: Path = ( Path(tempfile.gettempdir()) / "runner_storage" / str(uuid4()) @@ -432,10 +442,14 @@ def goodbye_flow(name): ) ) - def execute_in_background(self, func, *args, **kwargs): + def execute_in_background( + self, func: Callable[..., Any], *args: Any, **kwargs: Any + ): """ Executes a function in the background. """ + if TYPE_CHECKING: + assert self._loop is not None return asyncio.run_coroutine_threadsafe(func(*args, **kwargs), self._loop) @@ -536,7 +550,7 @@ def _get_flow_run_logger(self, flow_run: "FlowRun") -> PrefectLogAdapter: async def _run_process( self, flow_run: "FlowRun", - task_status: Optional[anyio.abc.TaskStatus] = None, + task_status: Optional[anyio.abc.TaskStatus[Any]] = None, entrypoint: Optional[str] = None, ): """ @@ -723,7 +737,9 @@ async def _get_and_submit_flow_runs(self): return await self._submit_scheduled_flow_runs(flow_run_response=runs_response) async def _check_for_cancelled_flow_runs( - self, should_stop: Callable = lambda: False, on_stop: Callable = lambda: None + self, + should_stop: Callable[[], bool] = lambda: False, + on_stop: Callable[[], None] = lambda: None, ): """ Checks for flow runs with CANCELLING a cancelling state and attempts to @@ -862,31 +878,37 @@ def _emit_flow_run_cancelled_event( flow: "Optional[APIFlow]", deployment: "Optional[Deployment]", ): - related = [] - tags = [] + related: list[RelatedResource] = [] + tags: list[str] = [] if deployment: related.append( - { - "prefect.resource.id": f"prefect.deployment.{deployment.id}", - "prefect.resource.role": "deployment", - "prefect.resource.name": deployment.name, - } + RelatedResource( + { + "prefect.resource.id": f"prefect.deployment.{deployment.id}", + "prefect.resource.role": "deployment", + "prefect.resource.name": deployment.name, + } + ) ) tags.extend(deployment.tags) if flow: related.append( + RelatedResource( + { + "prefect.resource.id": f"prefect.flow.{flow.id}", + "prefect.resource.role": "flow", + "prefect.resource.name": flow.name, + } + ) + ) + related.append( + RelatedResource( { - "prefect.resource.id": f"prefect.flow.{flow.id}", - "prefect.resource.role": "flow", - "prefect.resource.name": flow.name, + "prefect.resource.id": f"prefect.flow-run.{flow_run.id}", + "prefect.resource.role": "flow-run", + "prefect.resource.name": flow_run.name, } ) - related.append( - { - "prefect.resource.id": f"prefect.flow-run.{flow_run.id}", - "prefect.resource.role": "flow-run", - "prefect.resource.name": flow_run.name, - } ) tags.extend(flow_run.tags) diff --git a/src/prefect/runner/server.py b/src/prefect/runner/server.py index 8a8f6ca756f9..9a3688b09b5c 100644 --- a/src/prefect/runner/server.py +++ b/src/prefect/runner/server.py @@ -1,5 +1,5 @@ import uuid -from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple +from typing import TYPE_CHECKING, Any, Callable, Coroutine, Hashable, Optional, Tuple import pendulum import uvicorn @@ -22,7 +22,7 @@ PREFECT_RUNNER_SERVER_MISSED_POLLS_TOLERANCE, PREFECT_RUNNER_SERVER_PORT, ) -from prefect.utilities.asyncutils import sync_compatible +from prefect.utilities.asyncutils import run_coro_as_sync from prefect.utilities.importtools import load_script_as_module if TYPE_CHECKING: @@ -38,11 +38,13 @@ class RunnerGenericFlowRunRequest(BaseModel): entrypoint: str - parameters: Optional[Dict[str, Any]] = None + parameters: Optional[dict[str, Any]] = None parent_task_run_id: Optional[uuid.UUID] = None -def perform_health_check(runner, delay_threshold: Optional[int] = None) -> JSONResponse: +def perform_health_check( + runner: "Runner", delay_threshold: Optional[int] = None +) -> Callable[..., JSONResponse]: if delay_threshold is None: delay_threshold = ( PREFECT_RUNNER_SERVER_MISSED_POLLS_TOLERANCE.value() @@ -63,15 +65,15 @@ def _health_check(): return _health_check -def run_count(runner) -> int: - def _run_count(): - run_count = len(runner._flow_run_process_map) +def run_count(runner: "Runner") -> Callable[..., int]: + def _run_count() -> int: + run_count = len(runner._flow_run_process_map) # pyright: ignore[reportPrivateUsage] return run_count return _run_count -def shutdown(runner) -> int: +def shutdown(runner: "Runner") -> Callable[..., JSONResponse]: def _shutdown(): runner.stop() return JSONResponse(status_code=status.HTTP_200_OK, content={"message": "OK"}) @@ -81,9 +83,9 @@ def _shutdown(): async def _build_endpoint_for_deployment( deployment: "DeploymentResponse", runner: "Runner" -) -> Callable: +) -> Callable[..., Coroutine[Any, Any, JSONResponse]]: async def _create_flow_run_for_deployment( - body: Optional[Dict[Any, Any]] = None, + body: Optional[dict[Any, Any]] = None, ) -> JSONResponse: body = body or {} if deployment.enforce_parameter_schema and deployment.parameter_openapi_schema: @@ -116,11 +118,11 @@ async def _create_flow_run_for_deployment( async def get_deployment_router( runner: "Runner", -) -> Tuple[APIRouter, Dict[str, Dict]]: +) -> Tuple[APIRouter, dict[Hashable, Any]]: router = APIRouter() - schemas = {} + schemas: dict[Hashable, Any] = {} async with get_client() as client: - for deployment_id in runner._deployment_ids: + for deployment_id in runner._deployment_ids: # pyright: ignore[reportPrivateUsage] deployment = await client.read_deployment(deployment_id) router.add_api_route( f"/deployment/{deployment.id}/run", @@ -142,21 +144,21 @@ async def get_deployment_router( return router, schemas -async def get_subflow_schemas(runner: "Runner") -> Dict[str, Dict]: +async def get_subflow_schemas(runner: "Runner") -> dict[str, dict[str, Any]]: """ Load available subflow schemas by filtering for only those subflows in the deployment entrypoint's import space. """ - schemas = {} + schemas: dict[str, dict[str, Any]] = {} async with get_client() as client: - for deployment_id in runner._deployment_ids: + for deployment_id in runner._deployment_ids: # pyright: ignore[reportPrivateUsage] deployment = await client.read_deployment(deployment_id) if deployment.entrypoint is None: continue script = deployment.entrypoint.split(":")[0] module = load_script_as_module(script) - subflows = [ + subflows: list[Flow[Any, Any]] = [ obj for obj in module.__dict__.values() if isinstance(obj, Flow) ] for flow in subflows: @@ -165,7 +167,7 @@ async def get_subflow_schemas(runner: "Runner") -> Dict[str, Dict]: return schemas -def _flow_in_schemas(flow: Flow, schemas: Dict[str, Dict]) -> bool: +def _flow_in_schemas(flow: Flow[Any, Any], schemas: dict[str, dict[str, Any]]) -> bool: """ Check if a flow is in the schemas dict, either by name or by name with dashes replaced with underscores. @@ -174,7 +176,9 @@ def _flow_in_schemas(flow: Flow, schemas: Dict[str, Dict]) -> bool: return flow.name in schemas or flow_name_with_dashes in schemas -def _flow_schema_changed(flow: Flow, schemas: Dict[str, Dict]) -> bool: +def _flow_schema_changed( + flow: Flow[Any, Any], schemas: dict[str, dict[str, Any]] +) -> bool: """ Check if a flow's schemas have changed, either by bame of by name with dashes replaced with underscores. @@ -188,8 +192,8 @@ def _flow_schema_changed(flow: Flow, schemas: Dict[str, Dict]) -> bool: def _build_generic_endpoint_for_flows( - runner: "Runner", schemas: Dict[str, Dict] -) -> Callable: + runner: "Runner", schemas: dict[str, dict[str, Any]] +) -> Callable[..., Coroutine[Any, Any, JSONResponse]]: async def _create_flow_run_for_flow_from_fqn( body: RunnerGenericFlowRunRequest, ) -> JSONResponse: @@ -241,7 +245,6 @@ async def _create_flow_run_for_flow_from_fqn( return _create_flow_run_for_flow_from_fqn -@sync_compatible async def build_server(runner: "Runner") -> FastAPI: """ Build a FastAPI server for a runner. @@ -297,16 +300,11 @@ def start_webserver(runner: "Runner", log_level: Optional[str] = None) -> None: host = PREFECT_RUNNER_SERVER_HOST.value() port = PREFECT_RUNNER_SERVER_PORT.value() log_level = log_level or PREFECT_RUNNER_SERVER_LOG_LEVEL.value() - webserver = build_server(runner) + webserver = run_coro_as_sync(build_server(runner)) + if TYPE_CHECKING: + assert webserver is not None, "webserver should be built" + assert log_level is not None, "log_level should be set" + uvicorn.run( webserver, host=host, port=port, log_level=log_level.lower() ) # Uvicorn supports only lowercase log_level - # From the Uvicorn config file: - # LOG_LEVELS: dict[str, int] = { - # "critical": logging.CRITICAL, - # "error": logging.ERROR, - # "warning": logging.WARNING, - # "info": logging.INFO, - # "debug": logging.DEBUG, - # "trace": TRACE_LOG_LEVEL, - # } diff --git a/src/prefect/runner/storage.py b/src/prefect/runner/storage.py index 8a77b044482d..6de697d2462b 100644 --- a/src/prefect/runner/storage.py +++ b/src/prefect/runner/storage.py @@ -53,14 +53,14 @@ async def pull_code(self): """ ... - def to_pull_step(self) -> dict: + def to_pull_step(self) -> dict[str, Any]: """ Returns a dictionary representation of the storage object that can be used as a deployment pull step. """ ... - def __eq__(self, __value) -> bool: + def __eq__(self, __value: Any) -> bool: """ Equality check for runner storage objects. """ @@ -69,7 +69,7 @@ def __eq__(self, __value) -> bool: class GitCredentials(TypedDict, total=False): username: str - access_token: Union[str, Secret] + access_token: Union[str, Secret[str]] class GitRepository: diff --git a/src/prefect/runner/submit.py b/src/prefect/runner/submit.py index f57d9ccc10cf..ec42a4029a79 100644 --- a/src/prefect/runner/submit.py +++ b/src/prefect/runner/submit.py @@ -42,11 +42,8 @@ async def _submit_flow_to_runner( Returns: A `FlowRun` object representing the flow run that was submitted. """ - from prefect.utilities.engine import ( - _dynamic_key_for_task_run, - collect_task_run_inputs, - resolve_inputs, - ) + from prefect.utilities._engine import dynamic_key_for_task_run + from prefect.utilities.engine import collect_task_run_inputs, resolve_inputs async with get_client() as client: if not retry_failed_submissions: @@ -67,7 +64,7 @@ async def _submit_flow_to_runner( parent_flow_run_context.flow_run.id if parent_flow_run_context else None ), dynamic_key=( - _dynamic_key_for_task_run(parent_flow_run_context, dummy_task) + dynamic_key_for_task_run(parent_flow_run_context, dummy_task) if parent_flow_run_context else str(uuid.uuid4()) ), diff --git a/src/prefect/runner/utils.py b/src/prefect/runner/utils.py index bdc554524952..9f710f9b9d65 100644 --- a/src/prefect/runner/utils.py +++ b/src/prefect/runner/utils.py @@ -1,5 +1,5 @@ from copy import deepcopy -from typing import Any, Dict +from typing import Any from fastapi import FastAPI from fastapi.openapi.utils import get_openapi @@ -8,8 +8,8 @@ def inject_schemas_into_openapi( - webserver: FastAPI, schemas_to_inject: Dict[str, Any] -) -> Dict[str, Any]: + webserver: FastAPI, schemas_to_inject: dict[str, Any] +) -> dict[str, Any]: """ Augments the webserver's OpenAPI schema with additional schemas from deployments / flows / tasks. @@ -29,8 +29,8 @@ def inject_schemas_into_openapi( def merge_definitions( - injected_schemas: Dict[str, Any], openapi_schema: Dict[str, Any] -) -> Dict[str, Any]: + injected_schemas: dict[str, Any], openapi_schema: dict[str, Any] +) -> dict[str, Any]: """ Integrates definitions from injected schemas into the OpenAPI components. @@ -69,7 +69,7 @@ def update_refs_in_schema(schema_item: Any, new_ref: str) -> None: update_refs_in_schema(item, new_ref) -def update_refs_to_components(openapi_schema: Dict[str, Any]) -> Dict[str, Any]: +def update_refs_to_components(openapi_schema: dict[str, Any]) -> dict[str, Any]: """ Updates all `$ref` fields in the OpenAPI schema to reference the components section. diff --git a/src/prefect/serializers.py b/src/prefect/serializers.py index c89eb68031af..bbd188cd5907 100644 --- a/src/prefect/serializers.py +++ b/src/prefect/serializers.py @@ -13,7 +13,7 @@ import abc import base64 -from typing import Any, Dict, Generic, Optional, Type +from typing import Any, Generic, Optional, Type, Union from pydantic import ( BaseModel, @@ -23,7 +23,7 @@ ValidationError, field_validator, ) -from typing_extensions import Literal, Self, TypeVar +from typing_extensions import Self, TypeVar from prefect._internal.schemas.validators import ( cast_type_names_to_serializers, @@ -54,7 +54,7 @@ def prefect_json_object_encoder(obj: Any) -> Any: } -def prefect_json_object_decoder(result: dict): +def prefect_json_object_decoder(result: dict[str, Any]): """ `JSONDecoder.object_hook` for decoding objects from JSON when previously encoded with `prefect_json_object_encoder` @@ -80,12 +80,16 @@ def __init__(self, **data: Any) -> None: data.setdefault("type", type_string) super().__init__(**data) - def __new__(cls: Type[Self], **kwargs) -> Self: + def __new__(cls: Type[Self], **kwargs: Any) -> Self: if "type" in kwargs: try: subcls = lookup_type(cls, dispatch_key=kwargs["type"]) except KeyError as exc: - raise ValidationError(errors=[exc], model=cls) + raise ValidationError.from_exception_data( + title=cls.__name__, + line_errors=[{"type": str(exc), "input": kwargs["type"]}], + input_type="python", + ) return super().__new__(subcls) else: @@ -104,7 +108,7 @@ def loads(self, blob: bytes) -> D: model_config = ConfigDict(extra="forbid") @classmethod - def __dispatch_key__(cls) -> str: + def __dispatch_key__(cls) -> Optional[str]: type_str = cls.model_fields["type"].default return type_str if isinstance(type_str, str) else None @@ -119,19 +123,15 @@ class PickleSerializer(Serializer): - Wraps pickles in base64 for safe transmission. """ - type: Literal["pickle"] = "pickle" + type: str = Field(default="pickle", frozen=True) picklelib: str = "cloudpickle" picklelib_version: Optional[str] = None @field_validator("picklelib") - def check_picklelib(cls, value): + def check_picklelib(cls, value: str) -> str: return validate_picklelib(value) - # @model_validator(mode="before") - # def check_picklelib_version(cls, values): - # return validate_picklelib_version(values) - def dumps(self, obj: Any) -> bytes: pickler = from_qualified_name(self.picklelib) blob = pickler.dumps(obj) @@ -151,7 +151,7 @@ class JSONSerializer(Serializer): Wraps the `json` library to serialize to UTF-8 bytes instead of string types. """ - type: Literal["json"] = "json" + type: str = Field(default="json", frozen=True) jsonlib: str = "json" object_encoder: Optional[str] = Field( @@ -171,23 +171,27 @@ class JSONSerializer(Serializer): "by our default `object_encoder`." ), ) - dumps_kwargs: Dict[str, Any] = Field(default_factory=dict) - loads_kwargs: Dict[str, Any] = Field(default_factory=dict) + dumps_kwargs: dict[str, Any] = Field(default_factory=dict) + loads_kwargs: dict[str, Any] = Field(default_factory=dict) @field_validator("dumps_kwargs") - def dumps_kwargs_cannot_contain_default(cls, value): + def dumps_kwargs_cannot_contain_default( + cls, value: dict[str, Any] + ) -> dict[str, Any]: return validate_dump_kwargs(value) @field_validator("loads_kwargs") - def loads_kwargs_cannot_contain_object_hook(cls, value): + def loads_kwargs_cannot_contain_object_hook( + cls, value: dict[str, Any] + ) -> dict[str, Any]: return validate_load_kwargs(value) - def dumps(self, data: Any) -> bytes: + def dumps(self, obj: Any) -> bytes: json = from_qualified_name(self.jsonlib) kwargs = self.dumps_kwargs.copy() if self.object_encoder: kwargs["default"] = from_qualified_name(self.object_encoder) - result = json.dumps(data, **kwargs) + result = json.dumps(obj, **kwargs) if isinstance(result, str): # The standard library returns str but others may return bytes directly result = result.encode() @@ -213,17 +217,17 @@ class CompressedSerializer(Serializer): level: If not null, the level of compression to pass to `compress`. """ - type: Literal["compressed"] = "compressed" + type: str = Field(default="compressed", frozen=True) serializer: Serializer compressionlib: str = "lzma" @field_validator("serializer", mode="before") - def validate_serializer(cls, value): + def validate_serializer(cls, value: Union[str, Serializer]) -> Serializer: return cast_type_names_to_serializers(value) @field_validator("compressionlib") - def check_compressionlib(cls, value): + def check_compressionlib(cls, value: str) -> str: return validate_compressionlib(value) def dumps(self, obj: Any) -> bytes: @@ -242,7 +246,7 @@ class CompressedPickleSerializer(CompressedSerializer): A compressed serializer preconfigured to use the pickle serializer. """ - type: Literal["compressed/pickle"] = "compressed/pickle" + type: str = Field(default="compressed/pickle", frozen=True) serializer: Serializer = Field(default_factory=PickleSerializer) @@ -252,6 +256,6 @@ class CompressedJSONSerializer(CompressedSerializer): A compressed serializer preconfigured to use the json serializer. """ - type: Literal["compressed/json"] = "compressed/json" + type: str = Field(default="compressed/json", frozen=True) serializer: Serializer = Field(default_factory=JSONSerializer) diff --git a/src/prefect/server/api/deployments.py b/src/prefect/server/api/deployments.py index a690b602322b..347b8f33a60a 100644 --- a/src/prefect/server/api/deployments.py +++ b/src/prefect/server/api/deployments.py @@ -9,7 +9,6 @@ import jsonschema.exceptions import pendulum from fastapi import Body, Depends, HTTPException, Path, Response, status -from pydantic_extra_types.pendulum_dt import DateTime from starlette.background import BackgroundTasks import prefect.server.api.dependencies as dependencies @@ -27,6 +26,7 @@ from prefect.server.models.workers import DEFAULT_AGENT_WORK_POOL_NAME from prefect.server.schemas.responses import DeploymentPaginationResponse from prefect.server.utilities.server import PrefectRouter +from prefect.types import DateTime from prefect.utilities.schema_tools.hydration import ( HydrationContext, HydrationError, diff --git a/src/prefect/server/api/flow_runs.py b/src/prefect/server/api/flow_runs.py index 7473a815ba29..bace90d0f8e4 100644 --- a/src/prefect/server/api/flow_runs.py +++ b/src/prefect/server/api/flow_runs.py @@ -21,7 +21,6 @@ status, ) from fastapi.responses import ORJSONResponse, PlainTextResponse, StreamingResponse -from pydantic_extra_types.pendulum_dt import DateTime from sqlalchemy.exc import IntegrityError import prefect.server.api.dependencies as dependencies @@ -45,6 +44,7 @@ OrchestrationResult, ) from prefect.server.utilities.server import PrefectRouter +from prefect.types import DateTime from prefect.utilities import schema_tools logger = get_logger("server.api") @@ -56,12 +56,12 @@ async def create_flow_run( flow_run: schemas.actions.FlowRunCreate, db: PrefectDBInterface = Depends(provide_database_interface), - response: Response = None, + response: Response = None, # type: ignore created_by: Optional[schemas.core.CreatedBy] = Depends(dependencies.get_created_by), orchestration_parameters: Dict[str, Any] = Depends( orchestration_dependencies.provide_flow_orchestration_parameters ), - api_version=Depends(dependencies.provide_request_api_version), + api_version: str = Depends(dependencies.provide_request_api_version), ) -> schemas.responses.FlowRunResponse: """ Create a flow run. If a flow run with the same flow_id and @@ -70,20 +70,22 @@ async def create_flow_run( If no state is provided, the flow run will be created in a PENDING state. """ # hydrate the input model into a full flow run / state model - flow_run = schemas.core.FlowRun(**flow_run.model_dump(), created_by=created_by) + flow_run_object = schemas.core.FlowRun( + **flow_run.model_dump(), created_by=created_by + ) # pass the request version to the orchestration engine to support compatibility code orchestration_parameters.update({"api-version": api_version}) - if not flow_run.state: - flow_run.state = schemas.states.Pending() + if not flow_run_object.state: + flow_run_object.state = schemas.states.Pending() now = pendulum.now("UTC") async with db.session_context(begin_transaction=True) as session: model = await models.flow_runs.create_flow_run( session=session, - flow_run=flow_run, + flow_run=flow_run_object, orchestration_parameters=orchestration_parameters, ) if model.created >= now: @@ -838,3 +840,18 @@ async def generate(): "Content-Disposition": f"attachment; filename={flow_run.name}-logs.csv" }, ) + + +@router.patch("/{id}/labels", status_code=status.HTTP_204_NO_CONTENT) +async def update_flow_run_labels( + flow_run_id: UUID = Path(..., description="The flow run id", alias="id"), + labels: Dict[str, Any] = Body(..., description="The labels to update"), + db: PrefectDBInterface = Depends(provide_database_interface), +): + """ + Update the labels of a flow run. + """ + async with db.session_context(begin_transaction=True) as session: + await models.flow_runs.update_flow_run_labels( + session=session, flow_run_id=flow_run_id, labels=labels + ) diff --git a/src/prefect/server/api/run_history.py b/src/prefect/server/api/run_history.py index fe7a1b8cfd97..f70976f4a1e9 100644 --- a/src/prefect/server/api/run_history.py +++ b/src/prefect/server/api/run_history.py @@ -8,7 +8,6 @@ import pydantic import sqlalchemy as sa -from pydantic_extra_types.pendulum_dt import DateTime from typing_extensions import Literal import prefect.server.models as models @@ -16,6 +15,7 @@ from prefect.logging import get_logger from prefect.server.database.dependencies import db_injector from prefect.server.database.interface import PrefectDBInterface +from prefect.types import DateTime logger = get_logger("server.api") diff --git a/src/prefect/server/api/task_runs.py b/src/prefect/server/api/task_runs.py index b75b2abc7ad4..8912b3f5fdf0 100644 --- a/src/prefect/server/api/task_runs.py +++ b/src/prefect/server/api/task_runs.py @@ -17,7 +17,6 @@ WebSocket, status, ) -from pydantic_extra_types.pendulum_dt import DateTime from starlette.websockets import WebSocketDisconnect import prefect.server.api.dependencies as dependencies @@ -34,6 +33,7 @@ from prefect.server.task_queue import MultiQueue, TaskQueue from prefect.server.utilities import subscriptions from prefect.server.utilities.server import PrefectRouter +from prefect.types import DateTime logger = get_logger("server.api") diff --git a/src/prefect/server/api/ui/flow_runs.py b/src/prefect/server/api/ui/flow_runs.py index ee254c4b00b9..c76db8719dca 100644 --- a/src/prefect/server/api/ui/flow_runs.py +++ b/src/prefect/server/api/ui/flow_runs.py @@ -5,7 +5,6 @@ import sqlalchemy as sa from fastapi import Body, Depends from pydantic import Field -from pydantic_extra_types.pendulum_dt import DateTime import prefect.server.schemas as schemas from prefect._internal.schemas.bases import PrefectBaseModel @@ -15,6 +14,7 @@ from prefect.server.database.dependencies import provide_database_interface from prefect.server.database.interface import PrefectDBInterface from prefect.server.utilities.server import PrefectRouter +from prefect.types import DateTime logger = get_logger("server.api.ui.flow_runs") diff --git a/src/prefect/server/api/ui/flows.py b/src/prefect/server/api/ui/flows.py index 57261e459614..8128abd45ccf 100644 --- a/src/prefect/server/api/ui/flows.py +++ b/src/prefect/server/api/ui/flows.py @@ -6,7 +6,6 @@ import sqlalchemy as sa from fastapi import Body, Depends from pydantic import Field, field_validator -from pydantic_extra_types.pendulum_dt import DateTime from prefect.logging import get_logger from prefect.server.database import orm_models @@ -16,6 +15,7 @@ from prefect.server.utilities.database import UUID as UUIDTypeDecorator from prefect.server.utilities.schemas import PrefectBaseModel from prefect.server.utilities.server import PrefectRouter +from prefect.types import DateTime logger = get_logger() diff --git a/src/prefect/server/api/ui/task_runs.py b/src/prefect/server/api/ui/task_runs.py index 11d97a72e07d..b8f4bb778240 100644 --- a/src/prefect/server/api/ui/task_runs.py +++ b/src/prefect/server/api/ui/task_runs.py @@ -6,7 +6,6 @@ import sqlalchemy as sa from fastapi import Depends, HTTPException, status from pydantic import Field, model_serializer -from pydantic_extra_types.pendulum_dt import DateTime import prefect.server.schemas as schemas from prefect._internal.schemas.bases import PrefectBaseModel @@ -15,6 +14,7 @@ from prefect.server.database.dependencies import provide_database_interface from prefect.server.database.interface import PrefectDBInterface from prefect.server.utilities.server import PrefectRouter +from prefect.types import DateTime logger = get_logger("orion.api.ui.task_runs") diff --git a/src/prefect/server/api/work_queues.py b/src/prefect/server/api/work_queues.py index 7733691d521f..101b717c6081 100644 --- a/src/prefect/server/api/work_queues.py +++ b/src/prefect/server/api/work_queues.py @@ -15,7 +15,6 @@ Path, status, ) -from pydantic_extra_types.pendulum_dt import DateTime import prefect.server.api.dependencies as dependencies import prefect.server.models as models @@ -29,6 +28,7 @@ ) from prefect.server.schemas.statuses import WorkQueueStatus from prefect.server.utilities.server import PrefectRouter +from prefect.types import DateTime router = PrefectRouter(prefix="/work_queues", tags=["Work Queues"]) diff --git a/src/prefect/server/api/workers.py b/src/prefect/server/api/workers.py index cc85aa6f4df3..cc40dea8a9d8 100644 --- a/src/prefect/server/api/workers.py +++ b/src/prefect/server/api/workers.py @@ -15,7 +15,6 @@ Path, status, ) -from pydantic_extra_types.pendulum_dt import DateTime from sqlalchemy.ext.asyncio import AsyncSession import prefect.server.api.dependencies as dependencies @@ -32,6 +31,7 @@ from prefect.server.models.workers import emit_work_pool_status_event from prefect.server.schemas.statuses import WorkQueueStatus from prefect.server.utilities.server import PrefectRouter +from prefect.types import DateTime if TYPE_CHECKING: from prefect.server.database.orm_models import ORMWorkQueue diff --git a/src/prefect/server/database/migrations/MIGRATION-NOTES.md b/src/prefect/server/database/_migrations/MIGRATION-NOTES.md similarity index 98% rename from src/prefect/server/database/migrations/MIGRATION-NOTES.md rename to src/prefect/server/database/_migrations/MIGRATION-NOTES.md index 285a6f88599f..06e796e63c8f 100644 --- a/src/prefect/server/database/migrations/MIGRATION-NOTES.md +++ b/src/prefect/server/database/_migrations/MIGRATION-NOTES.md @@ -8,6 +8,10 @@ Each time a database migration is written, an entry is included here with: This gives us a history of changes and will create merge conflicts if two migrations are made at once, flagging situations where a branch needs to be updated before merging. +# Bring ORM models and migrations back in sync +SQLite: `a49711513ad4` +Postgres: `5d03c01be85e` + # Add `labels` column to Flow, FlowRun, TaskRun, and Deployment SQLite: `5952a5498b51` Postgres: `68a44144428d` diff --git a/src/prefect/server/database/migrations/env.py b/src/prefect/server/database/_migrations/env.py similarity index 84% rename from src/prefect/server/database/migrations/env.py rename to src/prefect/server/database/_migrations/env.py index da92f392335e..a64574bcf85e 100644 --- a/src/prefect/server/database/migrations/env.py +++ b/src/prefect/server/database/_migrations/env.py @@ -2,6 +2,7 @@ # https://alembic.sqlalchemy.org/en/latest/tutorial.html#creating-an-environment import contextlib +from typing import Optional import sqlalchemy from alembic import context @@ -23,7 +24,7 @@ def include_object( name: str, type_: str, reflected: bool, - compare_to: sqlalchemy.schema.SchemaItem, + compare_to: Optional[sqlalchemy.schema.SchemaItem], ) -> bool: """ Determines whether or not alembic should include an object when autogenerating @@ -53,13 +54,34 @@ def include_object( # * functional indexes (ending in 'desc', 'asc'), if an index with the same name already exists # * trigram indexes that already exist # * case_insensitive indexes that already exist + # * indexes that don't yet exist but have .ddl_if(dialect=...) metadata that doesn't match + # the current dialect. if type_ == "index": - if not reflected and any([name.endswith(suffix) for suffix in {"asc", "desc"}]): - return compare_to is None or object.name != compare_to.name - elif reflected and ( - name.startswith("gin") or name.endswith("case_insensitive") - ): - return False + if not reflected: + if name.endswith(("asc", "desc")): + return compare_to is None or object.name != compare_to.name + if (ddl_if := object._ddl_if) is not None and ddl_if.dialect is not None: + desired: set[str] = ( + {ddl_if.dialect} + if isinstance(ddl_if.dialect, str) + else set(ddl_if.dialect) + ) + return dialect.name in desired + + else: # reflected + if name.startswith("gin") or name.endswith("case_insensitive"): + return False + + # SQLite doesn't have an enum type, so reflection always comes back with + # a VARCHAR column, which doesn't match. Skip columns where the type + # doesn't match + if ( + dialect.name == "sqlite" + and type_ == "column" + and object.type.__visit_name__ == "enum" + and compare_to is not None + ): + return compare_to.type.__visit_name__ == "enum" return True diff --git a/src/prefect/server/database/migrations/script.py.mako b/src/prefect/server/database/_migrations/script.py.mako similarity index 100% rename from src/prefect/server/database/migrations/script.py.mako rename to src/prefect/server/database/_migrations/script.py.mako diff --git a/src/prefect/server/database/migrations/versions/postgresql/2021_01_20_122127_25f4b90a7a42_initial_migration.py b/src/prefect/server/database/_migrations/versions/postgresql/2021_01_20_122127_25f4b90a7a42_initial_migration.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2021_01_20_122127_25f4b90a7a42_initial_migration.py rename to src/prefect/server/database/_migrations/versions/postgresql/2021_01_20_122127_25f4b90a7a42_initial_migration.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_02_13_125213_5f376def75c3_block_data.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_02_13_125213_5f376def75c3_block_data.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_02_13_125213_5f376def75c3_block_data.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_02_13_125213_5f376def75c3_block_data.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_02_13_125213_679e695af6ba_add_configurations.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_02_13_125213_679e695af6ba_add_configurations.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_02_13_125213_679e695af6ba_add_configurations.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_02_13_125213_679e695af6ba_add_configurations.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_02_17_140821_5bff7878e700_add_agents_and_work_queue.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_02_17_140821_5bff7878e700_add_agents_and_work_queue.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_02_17_140821_5bff7878e700_add_agents_and_work_queue.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_02_17_140821_5bff7878e700_add_agents_and_work_queue.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_02_19_205543_d9d98a9ebb6f_rename_block_data_table.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_02_19_205543_d9d98a9ebb6f_rename_block_data_table.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_02_19_205543_d9d98a9ebb6f_rename_block_data_table.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_02_19_205543_d9d98a9ebb6f_rename_block_data_table.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_02_20_103844_4799f657a6a1_add_block_spec_table.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_02_20_103844_4799f657a6a1_add_block_spec_table.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_02_20_103844_4799f657a6a1_add_block_spec_table.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_02_20_103844_4799f657a6a1_add_block_spec_table.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_02_21_111050_d115556a8ab6_index_flowrun_flow_runner_type.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_02_21_111050_d115556a8ab6_index_flowrun_flow_runner_type.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_02_21_111050_d115556a8ab6_index_flowrun_flow_runner_type.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_02_21_111050_d115556a8ab6_index_flowrun_flow_runner_type.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_02_21_150017_b68b3cad6b8a_add_block_spec_id_to_blocks.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_02_21_150017_b68b3cad6b8a_add_block_spec_id_to_blocks.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_02_21_150017_b68b3cad6b8a_add_block_spec_id_to_blocks.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_02_21_150017_b68b3cad6b8a_add_block_spec_id_to_blocks.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_03_10_102713_2e7e1428ffce_index_flow_created.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_03_10_102713_2e7e1428ffce_index_flow_created.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_03_10_102713_2e7e1428ffce_index_flow_created.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_03_10_102713_2e7e1428ffce_index_flow_created.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_04_20_113011_605ebb4e9155_add_flow_run_state_name.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_04_20_113011_605ebb4e9155_add_flow_run_state_name.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_04_20_113011_605ebb4e9155_add_flow_run_state_name.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_04_20_113011_605ebb4e9155_add_flow_run_state_name.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_04_21_095519_14dc68cc5853_backfill_state_name.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_04_21_095519_14dc68cc5853_backfill_state_name.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_04_21_095519_14dc68cc5853_backfill_state_name.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_04_21_095519_14dc68cc5853_backfill_state_name.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_04_23_132803_d38c5e6a9115_rename_block_to_blockbasis_and_.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_04_23_132803_d38c5e6a9115_rename_block_to_blockbasis_and_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_04_23_132803_d38c5e6a9115_rename_block_to_blockbasis_and_.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_04_23_132803_d38c5e6a9115_rename_block_to_blockbasis_and_.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_05_10_145956_1c9390e2f9c6_replace_version_with_checksum_and_.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_05_10_145956_1c9390e2f9c6_replace_version_with_checksum_and_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_05_10_145956_1c9390e2f9c6_replace_version_with_checksum_and_.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_05_10_145956_1c9390e2f9c6_replace_version_with_checksum_and_.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_05_12_202952_dc7a3c6fd3e9_add_flow_run_alerts.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_05_12_202952_dc7a3c6fd3e9_add_flow_run_alerts.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_05_12_202952_dc7a3c6fd3e9_add_flow_run_alerts.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_05_12_202952_dc7a3c6fd3e9_add_flow_run_alerts.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_05_26_135743_724e6dcc6b5d_add_block_schema_capabilities.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_05_26_135743_724e6dcc6b5d_add_block_schema_capabilities.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_05_26_135743_724e6dcc6b5d_add_block_schema_capabilities.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_05_26_135743_724e6dcc6b5d_add_block_schema_capabilities.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_05_28_081821_2fe6fe6ca16e_adds_block_schema_references_and_block_.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_05_28_081821_2fe6fe6ca16e_adds_block_schema_references_and_block_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_05_28_081821_2fe6fe6ca16e_adds_block_schema_references_and_block_.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_05_28_081821_2fe6fe6ca16e_adds_block_schema_references_and_block_.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_05_30_112549_cdcb4018dd0e_rename_run_alerts_to_run_notifications.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_05_30_112549_cdcb4018dd0e_rename_run_alerts_to_run_notifications.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_05_30_112549_cdcb4018dd0e_rename_run_alerts_to_run_notifications.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_05_30_112549_cdcb4018dd0e_rename_run_alerts_to_run_notifications.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_06_04_133535_d60c18774a5d_add_indexes_for_partial_name_matches.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_06_04_133535_d60c18774a5d_add_indexes_for_partial_name_matches.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_06_04_133535_d60c18774a5d_add_indexes_for_partial_name_matches.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_06_04_133535_d60c18774a5d_add_indexes_for_partial_name_matches.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_06_08_121753_3a7c41d3b464_adds_description_and_code_example_to_.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_06_08_121753_3a7c41d3b464_adds_description_and_code_example_to_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_06_08_121753_3a7c41d3b464_adds_description_and_code_example_to_.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_06_08_121753_3a7c41d3b464_adds_description_and_code_example_to_.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_06_13_104234_61c76ee09e02_add_anonymous_column_for_block_documents.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_06_13_104234_61c76ee09e02_add_anonymous_column_for_block_documents.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_06_13_104234_61c76ee09e02_add_anonymous_column_for_block_documents.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_06_13_104234_61c76ee09e02_add_anonymous_column_for_block_documents.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_06_17_204409_d335ad57d5ba_add_block_schema_indexes.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_06_17_204409_d335ad57d5ba_add_block_schema_indexes.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_06_17_204409_d335ad57d5ba_add_block_schema_indexes.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_06_17_204409_d335ad57d5ba_add_block_schema_indexes.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_06_20_123921_7296741dff68_add_protected_column_for_block_types.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_06_20_123921_7296741dff68_add_protected_column_for_block_types.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_06_20_123921_7296741dff68_add_protected_column_for_block_types.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_06_20_123921_7296741dff68_add_protected_column_for_block_types.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_06_21_093732_29ad9bef6147_adds_indexes_for_block_filtering.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_06_21_093732_29ad9bef6147_adds_indexes_for_block_filtering.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_06_21_093732_29ad9bef6147_adds_indexes_for_block_filtering.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_06_21_093732_29ad9bef6147_adds_indexes_for_block_filtering.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_06_29_135432_813ddf14e2de_add_descriptions_to_deployments.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_06_29_135432_813ddf14e2de_add_descriptions_to_deployments.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_06_29_135432_813ddf14e2de_add_descriptions_to_deployments.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_06_29_135432_813ddf14e2de_add_descriptions_to_deployments.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_06_29_152219_2f46fc3f3beb_remove_name_column_for_notification_.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_06_29_152219_2f46fc3f3beb_remove_name_column_for_notification_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_06_29_152219_2f46fc3f3beb_remove_name_column_for_notification_.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_06_29_152219_2f46fc3f3beb_remove_name_column_for_notification_.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_07_06_152528_4cdc2ba709a4_migrates_block_schemas_with_new_secrets_.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_07_06_152528_4cdc2ba709a4_migrates_block_schemas_with_new_secrets_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_07_06_152528_4cdc2ba709a4_migrates_block_schemas_with_new_secrets_.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_07_06_152528_4cdc2ba709a4_migrates_block_schemas_with_new_secrets_.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_07_07_112809_e905fd199258_removes_debugprintnotification_block_.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_07_07_112809_e905fd199258_removes_debugprintnotification_block_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_07_07_112809_e905fd199258_removes_debugprintnotification_block_.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_07_07_112809_e905fd199258_removes_debugprintnotification_block_.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_07_11_170700_112c68143fc3_add_infrastructure_document_id_to_.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_07_11_170700_112c68143fc3_add_infrastructure_document_id_to_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_07_11_170700_112c68143fc3_add_infrastructure_document_id_to_.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_07_11_170700_112c68143fc3_add_infrastructure_document_id_to_.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_07_14_114039_0f27d462bf6d_removing_default_storage_block_document.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_07_14_114039_0f27d462bf6d_removing_default_storage_block_document.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_07_14_114039_0f27d462bf6d_removing_default_storage_block_document.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_07_14_114039_0f27d462bf6d_removing_default_storage_block_document.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_07_19_160058_bb4dc90d3e29_renames_existing_block_types.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_07_19_160058_bb4dc90d3e29_renames_existing_block_types.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_07_19_160058_bb4dc90d3e29_renames_existing_block_types.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_07_19_160058_bb4dc90d3e29_renames_existing_block_types.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_07_21_133134_e085c9cbf8ce_remove_flow_runners.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_07_21_133134_e085c9cbf8ce_remove_flow_runners.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_07_21_133134_e085c9cbf8ce_remove_flow_runners.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_07_21_133134_e085c9cbf8ce_remove_flow_runners.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_07_21_205820_0cf7311d6ea6_add_crashed_state_type.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_07_21_205820_0cf7311d6ea6_add_crashed_state_type.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_07_21_205820_0cf7311d6ea6_add_crashed_state_type.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_07_21_205820_0cf7311d6ea6_add_crashed_state_type.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_07_25_214717_4ff2f2bf81f4_adds_block_type_slug.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_07_25_214717_4ff2f2bf81f4_adds_block_type_slug.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_07_25_214717_4ff2f2bf81f4_adds_block_type_slug.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_07_25_214717_4ff2f2bf81f4_adds_block_type_slug.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_07_25_233637_add97ce1937d_update_deployments_to_include_more_.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_07_25_233637_add97ce1937d_update_deployments_to_include_more_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_07_25_233637_add97ce1937d_update_deployments_to_include_more_.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_07_25_233637_add97ce1937d_update_deployments_to_include_more_.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_07_29_181713_fa985d474982_add_index_to_flow_run_infrastructure_.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_07_29_181713_fa985d474982_add_index_to_flow_run_infrastructure_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_07_29_181713_fa985d474982_add_index_to_flow_run_infrastructure_.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_07_29_181713_fa985d474982_add_index_to_flow_run_infrastructure_.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_08_01_211251_97e212ea6545_add_deployment_version.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_08_01_211251_97e212ea6545_add_deployment_version.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_08_01_211251_97e212ea6545_add_deployment_version.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_08_01_211251_97e212ea6545_add_deployment_version.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_08_06_145817_60e428f92a75_expand_deployment_schema_for_improved_ux.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_08_06_145817_60e428f92a75_expand_deployment_schema_for_improved_ux.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_08_06_145817_60e428f92a75_expand_deployment_schema_for_improved_ux.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_08_06_145817_60e428f92a75_expand_deployment_schema_for_improved_ux.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_08_07_134410_77eb737fc759_add_work_queue_name_to_runs.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_08_07_134410_77eb737fc759_add_work_queue_name_to_runs.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_08_07_134410_77eb737fc759_add_work_queue_name_to_runs.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_08_07_134410_77eb737fc759_add_work_queue_name_to_runs.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_08_07_154550_7737221bf8a4_fix_concurrency_limit_tag_index_name.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_08_07_154550_7737221bf8a4_fix_concurrency_limit_tag_index_name.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_08_07_154550_7737221bf8a4_fix_concurrency_limit_tag_index_name.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_08_07_154550_7737221bf8a4_fix_concurrency_limit_tag_index_name.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_08_18_102804_2d5e000696f1_adds_block_schema_version.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_08_18_102804_2d5e000696f1_adds_block_schema_version.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_08_18_102804_2d5e000696f1_adds_block_schema_version.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_08_18_102804_2d5e000696f1_adds_block_schema_version.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_10_12_102048_22b7cb02e593_add_state_timestamp.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_10_12_102048_22b7cb02e593_add_state_timestamp.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_10_12_102048_22b7cb02e593_add_state_timestamp.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_10_12_102048_22b7cb02e593_add_state_timestamp.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_10_14_172612_ad4b1b4d1e9d_index_deployment_created.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_10_14_172612_ad4b1b4d1e9d_index_deployment_created.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_10_14_172612_ad4b1b4d1e9d_index_deployment_created.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_10_14_172612_ad4b1b4d1e9d_index_deployment_created.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_10_19_093902_6d548701edef_add_created_by.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_10_19_093902_6d548701edef_add_created_by.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_10_19_093902_6d548701edef_add_created_by.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_10_19_093902_6d548701edef_add_created_by.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_10_19_165110_8ea825da948d_track_retries_restarts.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_10_19_165110_8ea825da948d_track_retries_restarts.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_10_19_165110_8ea825da948d_track_retries_restarts.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_10_19_165110_8ea825da948d_track_retries_restarts.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_10_20_101423_3ced59d8806b_add_last_polled.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_10_20_101423_3ced59d8806b_add_last_polled.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_10_20_101423_3ced59d8806b_add_last_polled.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_10_20_101423_3ced59d8806b_add_last_polled.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_10_31_161719_41e5ed9e1034_.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_10_31_161719_41e5ed9e1034_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_10_31_161719_41e5ed9e1034_.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_10_31_161719_41e5ed9e1034_.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_11_05_180555_54c1876c68ae_add_index_for_scheduled_deployments.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_11_05_180555_54c1876c68ae_add_index_for_scheduled_deployments.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_11_05_180555_54c1876c68ae_add_index_for_scheduled_deployments.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_11_05_180555_54c1876c68ae_add_index_for_scheduled_deployments.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_11_10_171740_8caf7c1fd82c_add_coalesced_start_time_indices.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_11_10_171740_8caf7c1fd82c_add_coalesced_start_time_indices.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_11_10_171740_8caf7c1fd82c_add_coalesced_start_time_indices.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_11_10_171740_8caf7c1fd82c_add_coalesced_start_time_indices.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_11_18_161056_5d526270ddb4_add_flowrun_infrastructure_pid.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_11_18_161056_5d526270ddb4_add_flowrun_infrastructure_pid.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_11_18_161056_5d526270ddb4_add_flowrun_infrastructure_pid.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_11_18_161056_5d526270ddb4_add_flowrun_infrastructure_pid.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_11_23_092449_5e4f924ff96c_add_paused_state_type.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_11_23_092449_5e4f924ff96c_add_paused_state_type.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_11_23_092449_5e4f924ff96c_add_paused_state_type.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_11_23_092449_5e4f924ff96c_add_paused_state_type.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2022_11_24_143620_f7587d6c5776_add_worker_tables.py b/src/prefect/server/database/_migrations/versions/postgresql/2022_11_24_143620_f7587d6c5776_add_worker_tables.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2022_11_24_143620_f7587d6c5776_add_worker_tables.py rename to src/prefect/server/database/_migrations/versions/postgresql/2022_11_24_143620_f7587d6c5776_add_worker_tables.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_01_08_180142_d481d5058a19_rename_worker_pools_to_work_pools.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_01_08_180142_d481d5058a19_rename_worker_pools_to_work_pools.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_01_08_180142_d481d5058a19_rename_worker_pools_to_work_pools.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_01_08_180142_d481d5058a19_rename_worker_pools_to_work_pools.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_01_25_164028_9326a6aee18b_add_cancelling_to_state_type_enum.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_01_25_164028_9326a6aee18b_add_cancelling_to_state_type_enum.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_01_25_164028_9326a6aee18b_add_cancelling_to_state_type_enum.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_01_25_164028_9326a6aee18b_add_cancelling_to_state_type_enum.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_01_26_045500_2882cd2df463_implement_artifact_table.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_01_26_045500_2882cd2df463_implement_artifact_table.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_01_26_045500_2882cd2df463_implement_artifact_table.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_01_26_045500_2882cd2df463_implement_artifact_table.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_01_26_045501_2882cd2df464_create_migration_index.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_01_26_045501_2882cd2df464_create_migration_index.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_01_26_045501_2882cd2df464_create_migration_index.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_01_26_045501_2882cd2df464_create_migration_index.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_01_26_045501_2882cd2df465_migrate_artifact_data.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_01_26_045501_2882cd2df465_migrate_artifact_data.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_01_26_045501_2882cd2df465_migrate_artifact_data.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_01_26_045501_2882cd2df465_migrate_artifact_data.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_01_26_045502_2882cd2df466_cleanup_artifact_migration.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_01_26_045502_2882cd2df466_cleanup_artifact_migration.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_01_26_045502_2882cd2df466_cleanup_artifact_migration.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_01_26_045502_2882cd2df466_cleanup_artifact_migration.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_01_26_152801_0a1250a5aa25_expand_work_queue_table.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_01_26_152801_0a1250a5aa25_expand_work_queue_table.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_01_26_152801_0a1250a5aa25_expand_work_queue_table.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_01_26_152801_0a1250a5aa25_expand_work_queue_table.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_01_31_110543_f98ae6d8e2cc_work_queue_data_migration.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_01_31_110543_f98ae6d8e2cc_work_queue_data_migration.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_01_31_110543_f98ae6d8e2cc_work_queue_data_migration.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_01_31_110543_f98ae6d8e2cc_work_queue_data_migration.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_01_31_133052_2a88656f4a23_clean_up_work_queue_migration.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_01_31_133052_2a88656f4a23_clean_up_work_queue_migration.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_01_31_133052_2a88656f4a23_clean_up_work_queue_migration.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_01_31_133052_2a88656f4a23_clean_up_work_queue_migration.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_02_08_151958_cfdfec5d7557_remove_artifact_fk.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_02_08_151958_cfdfec5d7557_remove_artifact_fk.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_02_08_151958_cfdfec5d7557_remove_artifact_fk.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_02_08_151958_cfdfec5d7557_remove_artifact_fk.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_03_01_154651_7d918a392297_remove_flowrun_deployment_fk.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_03_01_154651_7d918a392297_remove_flowrun_deployment_fk.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_03_01_154651_7d918a392297_remove_flowrun_deployment_fk.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_03_01_154651_7d918a392297_remove_flowrun_deployment_fk.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_03_15_153039_4a1a0e4f89de_add_artifact_description_col.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_03_15_153039_4a1a0e4f89de_add_artifact_description_col.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_03_15_153039_4a1a0e4f89de_add_artifact_description_col.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_03_15_153039_4a1a0e4f89de_add_artifact_description_col.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_03_20_175243_aa84ac237ce8_remove_artifact_uq.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_03_20_175243_aa84ac237ce8_remove_artifact_uq.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_03_20_175243_aa84ac237ce8_remove_artifact_uq.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_03_20_175243_aa84ac237ce8_remove_artifact_uq.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_03_20_185238_d20618ce678e_add_artifact_collection_table.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_03_20_185238_d20618ce678e_add_artifact_collection_table.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_03_20_185238_d20618ce678e_add_artifact_collection_table.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_03_20_185238_d20618ce678e_add_artifact_collection_table.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_03_20_185610_46bd82c6279a_add_index_on_artifact.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_03_20_185610_46bd82c6279a_add_index_on_artifact.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_03_20_185610_46bd82c6279a_add_index_on_artifact.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_03_20_185610_46bd82c6279a_add_index_on_artifact.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_04_04_132534_3bf47e3ce2dd_add_index_on_log.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_04_04_132534_3bf47e3ce2dd_add_index_on_log.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_04_04_132534_3bf47e3ce2dd_add_index_on_log.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_04_04_132534_3bf47e3ce2dd_add_index_on_log.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_04_04_172310_6a1eb3d442e4_add_cols_to_artifact_collection.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_04_04_172310_6a1eb3d442e4_add_cols_to_artifact_collection.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_04_04_172310_6a1eb3d442e4_add_cols_to_artifact_collection.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_04_04_172310_6a1eb3d442e4_add_cols_to_artifact_collection.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_04_05_130406_43c94d4c7aa3_add_pull_steps_column_to_deployment.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_04_05_130406_43c94d4c7aa3_add_pull_steps_column_to_deployment.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_04_05_130406_43c94d4c7aa3_add_pull_steps_column_to_deployment.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_04_05_130406_43c94d4c7aa3_add_pull_steps_column_to_deployment.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_04_05_134520_310dda75f561_add_variables.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_04_05_134520_310dda75f561_add_variables.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_04_05_134520_310dda75f561_add_variables.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_04_05_134520_310dda75f561_add_variables.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_04_06_122716_15f5083c16bd_migrate_artifact_data.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_04_06_122716_15f5083c16bd_migrate_artifact_data.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_04_06_122716_15f5083c16bd_migrate_artifact_data.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_04_06_122716_15f5083c16bd_migrate_artifact_data.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_08_02_133838_5f623ddbf7fe_create_concurrency_limit_v2_table.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_08_02_133838_5f623ddbf7fe_create_concurrency_limit_v2_table.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_08_02_133838_5f623ddbf7fe_create_concurrency_limit_v2_table.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_08_02_133838_5f623ddbf7fe_create_concurrency_limit_v2_table.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_09_06_085747_50f8c182c3ca_.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_09_06_085747_50f8c182c3ca_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_09_06_085747_50f8c182c3ca_.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_09_06_085747_50f8c182c3ca_.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_09_20_134544_db0eb3973a54_adds_enforce_parameter_schema_column_to_.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_09_20_134544_db0eb3973a54_adds_enforce_parameter_schema_column_to_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_09_20_134544_db0eb3973a54_adds_enforce_parameter_schema_column_to_.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_09_20_134544_db0eb3973a54_adds_enforce_parameter_schema_column_to_.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_09_21_130125_4e9a6f93eb6c_make_slot_decay_per_second_not_nullable.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_09_21_130125_4e9a6f93eb6c_make_slot_decay_per_second_not_nullable.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_09_21_130125_4e9a6f93eb6c_make_slot_decay_per_second_not_nullable.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_09_21_130125_4e9a6f93eb6c_make_slot_decay_per_second_not_nullable.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_09_25_121806_05ea6f882b1d_remove_flow_run_id_requirement_from_task_run.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_09_25_121806_05ea6f882b1d_remove_flow_run_id_requirement_from_task_run.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_09_25_121806_05ea6f882b1d_remove_flow_run_id_requirement_from_task_run.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_09_25_121806_05ea6f882b1d_remove_flow_run_id_requirement_from_task_run.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_10_12_224511_bfe653bbf62e_add_last_polled_to_deployment.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_10_12_224511_bfe653bbf62e_add_last_polled_to_deployment.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_10_12_224511_bfe653bbf62e_add_last_polled_to_deployment.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_10_12_224511_bfe653bbf62e_add_last_polled_to_deployment.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_10_30_075026_cef24af2ec34_add_block_type_name_to_block_document.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_10_30_075026_cef24af2ec34_add_block_type_name_to_block_document.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_10_30_075026_cef24af2ec34_add_block_type_name_to_block_document.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_10_30_075026_cef24af2ec34_add_block_type_name_to_block_document.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_11_20_084708_9c493c02ca6d_add_trgm_index_to_block_document_name.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_11_20_084708_9c493c02ca6d_add_trgm_index_to_block_document_name.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_11_20_084708_9c493c02ca6d_add_trgm_index_to_block_document_name.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_11_20_084708_9c493c02ca6d_add_trgm_index_to_block_document_name.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_12_07_095320_733ca1903976_create_flow_run_input_table.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_12_07_095320_733ca1903976_create_flow_run_input_table.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_12_07_095320_733ca1903976_create_flow_run_input_table.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_12_07_095320_733ca1903976_create_flow_run_input_table.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2023_12_07_121416_7c453555d3a5_make_flowruninput_flow_run_id_a_foreign_.py b/src/prefect/server/database/_migrations/versions/postgresql/2023_12_07_121416_7c453555d3a5_make_flowruninput_flow_run_id_a_foreign_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2023_12_07_121416_7c453555d3a5_make_flowruninput_flow_run_id_a_foreign_.py rename to src/prefect/server/database/_migrations/versions/postgresql/2023_12_07_121416_7c453555d3a5_make_flowruninput_flow_run_id_a_foreign_.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_01_05_101034_6b63c51c31b4_add_sender_to_flowruninput.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_01_05_101034_6b63c51c31b4_add_sender_to_flowruninput.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_01_05_101034_6b63c51c31b4_add_sender_to_flowruninput.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_01_05_101034_6b63c51c31b4_add_sender_to_flowruninput.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_01_22_120615_8cf4d4933848_create_deployment_schedule_and_add_.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_01_22_120615_8cf4d4933848_create_deployment_schedule_and_add_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_01_22_120615_8cf4d4933848_create_deployment_schedule_and_add_.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_01_22_120615_8cf4d4933848_create_deployment_schedule_and_add_.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_03_05_122228_121699507574_add_job_variables_column_to_flow_runs.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_03_05_122228_121699507574_add_job_variables_column_to_flow_runs.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_03_05_122228_121699507574_add_job_variables_column_to_flow_runs.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_03_05_122228_121699507574_add_job_variables_column_to_flow_runs.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_03_13_111215_7a653837d9ba_create_csrf_token_toble.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_03_13_111215_7a653837d9ba_create_csrf_token_toble.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_03_13_111215_7a653837d9ba_create_csrf_token_toble.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_03_13_111215_7a653837d9ba_create_csrf_token_toble.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_04_03_112409_aeea5ee6f070_automations_models.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_04_03_112409_aeea5ee6f070_automations_models.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_04_03_112409_aeea5ee6f070_automations_models.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_04_03_112409_aeea5ee6f070_automations_models.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_04_04_094418_bd6efa529f03_add_deployment_version_to_flow_run.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_04_04_094418_bd6efa529f03_add_deployment_version_to_flow_run.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_04_04_094418_bd6efa529f03_add_deployment_version_to_flow_run.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_04_04_094418_bd6efa529f03_add_deployment_version_to_flow_run.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_04_09_125658_916718e8330f_automation_event_follower.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_04_09_125658_916718e8330f_automation_event_follower.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_04_09_125658_916718e8330f_automation_event_follower.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_04_09_125658_916718e8330f_automation_event_follower.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_04_09_132036_954db7517015_trigger_in_index.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_04_09_132036_954db7517015_trigger_in_index.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_04_09_132036_954db7517015_trigger_in_index.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_04_09_132036_954db7517015_trigger_in_index.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_04_10_194742_15768c2ec702_add_events_and_event_resources_tables.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_04_10_194742_15768c2ec702_add_events_and_event_resources_tables.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_04_10_194742_15768c2ec702_add_events_and_event_resources_tables.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_04_10_194742_15768c2ec702_add_events_and_event_resources_tables.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_04_23_094748_7ae9e431e67a_work_status_fields.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_04_23_094748_7ae9e431e67a_work_status_fields.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_04_23_094748_7ae9e431e67a_work_status_fields.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_04_23_094748_7ae9e431e67a_work_status_fields.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_04_25_155240_8905262ec07f_worker_status_field.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_04_25_155240_8905262ec07f_worker_status_field.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_04_25_155240_8905262ec07f_worker_status_field.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_04_25_155240_8905262ec07f_worker_status_field.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_05_01_105401_b23c83a12cb4_add_catchup_fields_to_deploymentschedule.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_05_01_105401_b23c83a12cb4_add_catchup_fields_to_deploymentschedule.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_05_01_105401_b23c83a12cb4_add_catchup_fields_to_deploymentschedule.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_05_01_105401_b23c83a12cb4_add_catchup_fields_to_deploymentschedule.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_05_21_101457_94622c1663e8_json_variables.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_05_21_101457_94622c1663e8_json_variables.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_05_21_101457_94622c1663e8_json_variables.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_05_21_101457_94622c1663e8_json_variables.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_07_15_145240_7495a5013e7e_adding_scope_to_followers.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_07_15_145240_7495a5013e7e_adding_scope_to_followers.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_07_15_145240_7495a5013e7e_adding_scope_to_followers.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_07_15_145240_7495a5013e7e_adding_scope_to_followers.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_08_14_150111_97429116795e_add_deployment_concurrency_limit.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_08_14_150111_97429116795e_add_deployment_concurrency_limit.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_08_14_150111_97429116795e_add_deployment_concurrency_limit.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_08_14_150111_97429116795e_add_deployment_concurrency_limit.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_09_11_090317_555ed31b284d_add_concurrency_options.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_09_11_090317_555ed31b284d_add_concurrency_options.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_09_11_090317_555ed31b284d_add_concurrency_options.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_09_11_090317_555ed31b284d_add_concurrency_options.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_09_16_152051_eaec5004771f_add_deployment_to_global_concurrency_.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_09_16_152051_eaec5004771f_add_deployment_to_global_concurrency_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_09_16_152051_eaec5004771f_add_deployment_to_global_concurrency_.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_09_16_152051_eaec5004771f_add_deployment_to_global_concurrency_.py diff --git a/src/prefect/server/database/migrations/versions/postgresql/2024_11_15_150706_68a44144428d_add_labels_column_to_flow_flowrun_.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_11_15_150706_68a44144428d_add_labels_column_to_flow_flowrun_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/postgresql/2024_11_15_150706_68a44144428d_add_labels_column_to_flow_flowrun_.py rename to src/prefect/server/database/_migrations/versions/postgresql/2024_11_15_150706_68a44144428d_add_labels_column_to_flow_flowrun_.py diff --git a/src/prefect/server/database/_migrations/versions/postgresql/2024_12_04_165333_5d03c01be85e_sync_orm_models_and_migrations.py b/src/prefect/server/database/_migrations/versions/postgresql/2024_12_04_165333_5d03c01be85e_sync_orm_models_and_migrations.py new file mode 100644 index 000000000000..40dc90ae5dd2 --- /dev/null +++ b/src/prefect/server/database/_migrations/versions/postgresql/2024_12_04_165333_5d03c01be85e_sync_orm_models_and_migrations.py @@ -0,0 +1,111 @@ +"""Sync ORM models and migrations + +Revision ID: 5d03c01be85e +Revises: 68a44144428d +Create Date: 2024-12-04 16:53:33.015870 + +""" + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "5d03c01be85e" +down_revision = "68a44144428d" +branch_labels = None +depends_on = None + + +def upgrade(): + # Column is non-null in the ORM and in SQLite. + op.execute( + "UPDATE artifact_collection SET latest_id=GEN_RANDOM_UUID() WHERE latest_id IS NULL" + ) + op.alter_column( + "artifact_collection", "latest_id", existing_type=sa.UUID(), nullable=False + ) + + # table added in 027c123512befd2bd00a0ef28bd44215e77bece6 but index was + # never created in a migration. + op.create_index( + op.f("ix_artifact_collection__updated"), + "artifact_collection", + ["updated"], + unique=False, + ) + + # columns removed in c53b00bfa1f6850ab43e168c92c627350c090647 + op.drop_column("deployment", "schedule") + op.drop_column("deployment", "is_schedule_active") + + # column removed in 5784c637e7e11a8e88e2b3146e54e9b6c97d50ef + op.drop_column("deployment", "flow_data") + + # Column is no longer a FK since d10c7471a69403bcf88f401091497a2dc8963885 + op.drop_index("ix_flow_run__deployment_id", table_name="flow_run") + + # column removed in eaa7a5063c73718dff56ce4aeb66e53fcafe60e5 + op.drop_column("deployment", "manifest_path") + + # columns removed from orm models in 0b62de684447c6955e04c722c276edac4002fd40 + op.drop_column("deployment_schedule", "catchup") + op.drop_column("deployment_schedule", "max_active_runs") + + +def downgrade(): + op.create_index( + "ix_flow_run__deployment_id", "flow_run", ["deployment_id"], unique=False + ) + op.add_column( + "deployment_schedule", + sa.Column("max_active_runs", sa.INTEGER(), autoincrement=False, nullable=True), + ) + op.add_column( + "deployment_schedule", + sa.Column( + "catchup", + sa.BOOLEAN(), + server_default=sa.text("false"), + autoincrement=False, + nullable=False, + ), + ) + op.add_column( + "deployment", + sa.Column( + "flow_data", + postgresql.JSONB(astext_type=sa.Text()), + autoincrement=False, + nullable=True, + ), + ) + op.add_column( + "deployment", + sa.Column( + "is_schedule_active", + sa.BOOLEAN(), + server_default=sa.text("true"), + autoincrement=False, + nullable=False, + ), + ) + op.add_column( + "deployment", + sa.Column("manifest_path", sa.VARCHAR(), autoincrement=False, nullable=True), + ) + op.add_column( + "deployment", + sa.Column( + "schedule", + postgresql.JSONB(astext_type=sa.Text()), + autoincrement=False, + nullable=True, + ), + ) + op.drop_index( + op.f("ix_artifact_collection__updated"), table_name="artifact_collection" + ) + op.alter_column( + "artifact_collection", "latest_id", existing_type=sa.UUID(), nullable=True + ) diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_01_20_115236_9725c1cbee35_initial_migration.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_01_20_115236_9725c1cbee35_initial_migration.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_01_20_115236_9725c1cbee35_initial_migration.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_01_20_115236_9725c1cbee35_initial_migration.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_02_04_093838_619bea85701a_block_data.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_02_04_093838_619bea85701a_block_data.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_02_04_093838_619bea85701a_block_data.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_02_04_093838_619bea85701a_block_data.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_02_15_211737_28ae48128c75_add_configurations.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_02_15_211737_28ae48128c75_add_configurations.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_02_15_211737_28ae48128c75_add_configurations.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_02_15_211737_28ae48128c75_add_configurations.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_02_17_151416_7c91cb86dc4e_add_agents_and_work_queues.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_02_17_151416_7c91cb86dc4e_add_agents_and_work_queues.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_02_17_151416_7c91cb86dc4e_add_agents_and_work_queues.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_02_17_151416_7c91cb86dc4e_add_agents_and_work_queues.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_02_19_210255_4c4a6a138053_rename_block_data_table.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_02_19_210255_4c4a6a138053_rename_block_data_table.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_02_19_210255_4c4a6a138053_rename_block_data_table.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_02_19_210255_4c4a6a138053_rename_block_data_table.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_02_20_103610_e1ff4973a9eb_add_block_spec_table.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_02_20_103610_e1ff4973a9eb_add_block_spec_table.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_02_20_103610_e1ff4973a9eb_add_block_spec_table.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_02_20_103610_e1ff4973a9eb_add_block_spec_table.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_02_21_111238_f327e877e423_index_flowrun_flow_runner_type.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_02_21_111238_f327e877e423_index_flowrun_flow_runner_type.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_02_21_111238_f327e877e423_index_flowrun_flow_runner_type.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_02_21_111238_f327e877e423_index_flowrun_flow_runner_type.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_02_21_145916_c8ff35f94028_add_block_spec_id_to_blocks.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_02_21_145916_c8ff35f94028_add_block_spec_id_to_blocks.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_02_21_145916_c8ff35f94028_add_block_spec_id_to_blocks.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_02_21_145916_c8ff35f94028_add_block_spec_id_to_blocks.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_03_10_102500_71a57ec351d1_index_flow_created.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_03_10_102500_71a57ec351d1_index_flow_created.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_03_10_102500_71a57ec351d1_index_flow_created.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_03_10_102500_71a57ec351d1_index_flow_created.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_04_19_181604_7f5f335cace3_add_flow_run_state_name.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_04_19_181604_7f5f335cace3_add_flow_run_state_name.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_04_19_181604_7f5f335cace3_add_flow_run_state_name.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_04_19_181604_7f5f335cace3_add_flow_run_state_name.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_04_21_113057_db6bde582447_backfill_state_name.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_04_21_113057_db6bde582447_backfill_state_name.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_04_21_113057_db6bde582447_backfill_state_name.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_04_21_113057_db6bde582447_backfill_state_name.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_04_23_114831_fd966d4ad99c_rename_block_to_blockbasis_and_.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_04_23_114831_fd966d4ad99c_rename_block_to_blockbasis_and_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_04_23_114831_fd966d4ad99c_rename_block_to_blockbasis_and_.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_04_23_114831_fd966d4ad99c_rename_block_to_blockbasis_and_.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_04_25_135207_b75d279ba985_replace_version_with_checksum.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_04_25_135207_b75d279ba985_replace_version_with_checksum.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_04_25_135207_b75d279ba985_replace_version_with_checksum.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_04_25_135207_b75d279ba985_replace_version_with_checksum.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_05_12_203158_888a0bb0df7b_add_flow_run_alerts.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_05_12_203158_888a0bb0df7b_add_flow_run_alerts.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_05_12_203158_888a0bb0df7b_add_flow_run_alerts.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_05_12_203158_888a0bb0df7b_add_flow_run_alerts.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_05_19_165808_33439667aeea_add_block_schema_capabilities.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_05_19_165808_33439667aeea_add_block_schema_capabilities.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_05_19_165808_33439667aeea_add_block_schema_capabilities.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_05_19_165808_33439667aeea_add_block_schema_capabilities.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_05_28_081650_e73c6f1fe752_adds_block_schema_referecnes_and_block_.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_05_28_081650_e73c6f1fe752_adds_block_schema_referecnes_and_block_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_05_28_081650_e73c6f1fe752_adds_block_schema_referecnes_and_block_.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_05_28_081650_e73c6f1fe752_adds_block_schema_referecnes_and_block_.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_05_30_100855_d76326ed0d06_rename_run_alerts_to_run_notifications.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_05_30_100855_d76326ed0d06_rename_run_alerts_to_run_notifications.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_05_30_100855_d76326ed0d06_rename_run_alerts_to_run_notifications.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_05_30_100855_d76326ed0d06_rename_run_alerts_to_run_notifications.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_06_04_104048_f65b6ad0b869_add_indexes_for_partial_name_matches.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_06_04_104048_f65b6ad0b869_add_indexes_for_partial_name_matches.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_06_04_104048_f65b6ad0b869_add_indexes_for_partial_name_matches.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_06_04_104048_f65b6ad0b869_add_indexes_for_partial_name_matches.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_06_08_121702_84892301571a_adds_description_and_code_example_to_.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_06_08_121702_84892301571a_adds_description_and_code_example_to_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_06_08_121702_84892301571a_adds_description_and_code_example_to_.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_06_08_121702_84892301571a_adds_description_and_code_example_to_.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_06_13_103943_2d900af9cd07_add_anonymous_column_for_block_documents.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_06_13_103943_2d900af9cd07_add_anonymous_column_for_block_documents.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_06_13_103943_2d900af9cd07_add_anonymous_column_for_block_documents.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_06_13_103943_2d900af9cd07_add_anonymous_column_for_block_documents.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_06_17_204530_9e2a1c08c6f1_add_block_schema_indexes.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_06_17_204530_9e2a1c08c6f1_add_block_schema_indexes.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_06_17_204530_9e2a1c08c6f1_add_block_schema_indexes.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_06_17_204530_9e2a1c08c6f1_add_block_schema_indexes.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_06_20_123823_dff8da7a6c2c_add_protected_column_for_block_types.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_06_20_123823_dff8da7a6c2c_add_protected_column_for_block_types.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_06_20_123823_dff8da7a6c2c_add_protected_column_for_block_types.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_06_20_123823_dff8da7a6c2c_add_protected_column_for_block_types.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_06_21_093640_a205b458d997_adds_indexes_for_block_filtering.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_06_21_093640_a205b458d997_adds_indexes_for_block_filtering.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_06_21_093640_a205b458d997_adds_indexes_for_block_filtering.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_06_21_093640_a205b458d997_adds_indexes_for_block_filtering.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_06_29_133432_3bd87ecdac38_add_descriptions_to_deployments.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_06_29_133432_3bd87ecdac38_add_descriptions_to_deployments.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_06_29_133432_3bd87ecdac38_add_descriptions_to_deployments.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_06_29_133432_3bd87ecdac38_add_descriptions_to_deployments.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_06_29_151832_42762c37b7bc_remove_name_column_for_notification_.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_06_29_151832_42762c37b7bc_remove_name_column_for_notification_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_06_29_151832_42762c37b7bc_remove_name_column_for_notification_.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_06_29_151832_42762c37b7bc_remove_name_column_for_notification_.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_07_06_142824_e2dae764a603_migrates_block_schemas_with_new_secrets_.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_07_06_142824_e2dae764a603_migrates_block_schemas_with_new_secrets_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_07_06_142824_e2dae764a603_migrates_block_schemas_with_new_secrets_.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_07_06_142824_e2dae764a603_migrates_block_schemas_with_new_secrets_.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_07_07_111208_061c7e518b40_removes_debugprintnotification_block_.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_07_07_111208_061c7e518b40_removes_debugprintnotification_block_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_07_07_111208_061c7e518b40_removes_debugprintnotification_block_.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_07_07_111208_061c7e518b40_removes_debugprintnotification_block_.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_07_11_113314_638cbcc2a158_add_infrastructure_block_id_to_.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_07_11_113314_638cbcc2a158_add_infrastructure_block_id_to_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_07_11_113314_638cbcc2a158_add_infrastructure_block_id_to_.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_07_11_113314_638cbcc2a158_add_infrastructure_block_id_to_.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_07_14_113138_56be24fdb383_removing_default_storage_block_document.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_07_14_113138_56be24fdb383_removing_default_storage_block_document.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_07_14_113138_56be24fdb383_removing_default_storage_block_document.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_07_14_113138_56be24fdb383_removing_default_storage_block_document.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_07_19_153432_628a873f0d1a_renames_existing_block_types.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_07_19_153432_628a873f0d1a_renames_existing_block_types.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_07_19_153432_628a873f0d1a_renames_existing_block_types.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_07_19_153432_628a873f0d1a_renames_existing_block_types.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_07_20_113451_2fe8ef6a6514_remove_flow_runners.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_07_20_113451_2fe8ef6a6514_remove_flow_runners.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_07_20_113451_2fe8ef6a6514_remove_flow_runners.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_07_20_113451_2fe8ef6a6514_remove_flow_runners.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_07_25_142515_f335f9633eec_adds_block_type_slug.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_07_25_142515_f335f9633eec_adds_block_type_slug.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_07_25_142515_f335f9633eec_adds_block_type_slug.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_07_25_142515_f335f9633eec_adds_block_type_slug.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_07_25_151028_88c2112b668f_update_deployments_to_include_more_.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_07_25_151028_88c2112b668f_update_deployments_to_include_more_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_07_25_151028_88c2112b668f_update_deployments_to_include_more_.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_07_25_151028_88c2112b668f_update_deployments_to_include_more_.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_07_29_181111_905134444e17_add_index_to_flow_run_infrastructure_.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_07_29_181111_905134444e17_add_index_to_flow_run_infrastructure_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_07_29_181111_905134444e17_add_index_to_flow_run_infrastructure_.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_07_29_181111_905134444e17_add_index_to_flow_run_infrastructure_.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_08_01_211039_24bb2e4a195c_add_deployment_version.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_08_01_211039_24bb2e4a195c_add_deployment_version.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_08_01_211039_24bb2e4a195c_add_deployment_version.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_08_01_211039_24bb2e4a195c_add_deployment_version.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_08_06_130009_296e2665785f_expand_deployment_schema_for_improved_ux.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_08_06_130009_296e2665785f_expand_deployment_schema_for_improved_ux.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_08_06_130009_296e2665785f_expand_deployment_schema_for_improved_ux.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_08_06_130009_296e2665785f_expand_deployment_schema_for_improved_ux.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_08_07_134138_575634b7acd4_add_work_queue_name_to_runs.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_08_07_134138_575634b7acd4_add_work_queue_name_to_runs.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_08_07_134138_575634b7acd4_add_work_queue_name_to_runs.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_08_07_134138_575634b7acd4_add_work_queue_name_to_runs.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_08_07_154319_53c19b31aa09_fix_name_on_concurrency_limit_tag_idx.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_08_07_154319_53c19b31aa09_fix_name_on_concurrency_limit_tag_idx.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_08_07_154319_53c19b31aa09_fix_name_on_concurrency_limit_tag_idx.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_08_07_154319_53c19b31aa09_fix_name_on_concurrency_limit_tag_idx.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_08_18_102527_e757138e954a_adds_block_schema_version.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_08_18_102527_e757138e954a_adds_block_schema_version.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_08_18_102527_e757138e954a_adds_block_schema_version.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_08_18_102527_e757138e954a_adds_block_schema_version.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_10_12_102048_22b7cb02e593_add_state_timestamp.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_10_12_102048_22b7cb02e593_add_state_timestamp.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_10_12_102048_22b7cb02e593_add_state_timestamp.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_10_12_102048_22b7cb02e593_add_state_timestamp.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_10_14_172612_ad4b1b4d1e9d_index_deployment_created.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_10_14_172612_ad4b1b4d1e9d_index_deployment_created.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_10_14_172612_ad4b1b4d1e9d_index_deployment_created.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_10_14_172612_ad4b1b4d1e9d_index_deployment_created.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_10_19_093542_fa319f214160_add_created_by.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_10_19_093542_fa319f214160_add_created_by.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_10_19_093542_fa319f214160_add_created_by.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_10_19_093542_fa319f214160_add_created_by.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_10_19_155810_af52717cf201_track_retries_restarts.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_10_19_155810_af52717cf201_track_retries_restarts.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_10_19_155810_af52717cf201_track_retries_restarts.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_10_19_155810_af52717cf201_track_retries_restarts.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_10_20_101423_3ced59d8806b_add_last_polled.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_10_20_101423_3ced59d8806b_add_last_polled.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_10_20_101423_3ced59d8806b_add_last_polled.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_10_20_101423_3ced59d8806b_add_last_polled.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_11_05_180619_a0284438370e_add_index_for_scheduled_deployments.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_11_05_180619_a0284438370e_add_index_for_scheduled_deployments.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_11_05_180619_a0284438370e_add_index_for_scheduled_deployments.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_11_05_180619_a0284438370e_add_index_for_scheduled_deployments.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_11_10_165921_4f90ad6349bd_add_coalesced_start_time_indices.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_11_10_165921_4f90ad6349bd_add_coalesced_start_time_indices.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_11_10_165921_4f90ad6349bd_add_coalesced_start_time_indices.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_11_10_165921_4f90ad6349bd_add_coalesced_start_time_indices.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_11_18_161332_7201de756d85_add_flowrun_infrastructure_pid.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_11_18_161332_7201de756d85_add_flowrun_infrastructure_pid.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_11_18_161332_7201de756d85_add_flowrun_infrastructure_pid.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_11_18_161332_7201de756d85_add_flowrun_infrastructure_pid.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_11_24_143302_fe77ad0dda06_add_worker_tables.py b/src/prefect/server/database/_migrations/versions/sqlite/2022_11_24_143302_fe77ad0dda06_add_worker_tables.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2022_11_24_143302_fe77ad0dda06_add_worker_tables.py rename to src/prefect/server/database/_migrations/versions/sqlite/2022_11_24_143302_fe77ad0dda06_add_worker_tables.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_01_08_175327_bb38729c471a_rename_worker_pools_to_work_pools.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_01_08_175327_bb38729c471a_rename_worker_pools_to_work_pools.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_01_08_175327_bb38729c471a_rename_worker_pools_to_work_pools.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_01_08_175327_bb38729c471a_rename_worker_pools_to_work_pools.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_01_12_000042_f92143d30c24_implement_artifact_table.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_01_12_000042_f92143d30c24_implement_artifact_table.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_01_12_000042_f92143d30c24_implement_artifact_table.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_01_12_000042_f92143d30c24_implement_artifact_table.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_01_12_000043_f92143d30c25_create_migration_index.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_01_12_000043_f92143d30c25_create_migration_index.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_01_12_000043_f92143d30c25_create_migration_index.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_01_12_000043_f92143d30c25_create_migration_index.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_01_12_000043_f92143d30c26_migrate_artifact_data.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_01_12_000043_f92143d30c26_migrate_artifact_data.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_01_12_000043_f92143d30c26_migrate_artifact_data.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_01_12_000043_f92143d30c26_migrate_artifact_data.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_01_12_000044_f92143d30c27_cleanup_artifact_migration.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_01_12_000044_f92143d30c27_cleanup_artifact_migration.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_01_12_000044_f92143d30c27_cleanup_artifact_migration.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_01_12_000044_f92143d30c27_cleanup_artifact_migration.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_01_25_114348_b9bda9f142f1_expand_work_queue_table.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_01_25_114348_b9bda9f142f1_expand_work_queue_table.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_01_25_114348_b9bda9f142f1_expand_work_queue_table.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_01_25_114348_b9bda9f142f1_expand_work_queue_table.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_01_31_105442_1678f2fb8b33_work_queue_data_migration.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_01_31_105442_1678f2fb8b33_work_queue_data_migration.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_01_31_105442_1678f2fb8b33_work_queue_data_migration.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_01_31_105442_1678f2fb8b33_work_queue_data_migration.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_01_31_132409_bfe42b7090d6_clean_up_work_queue_migration.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_01_31_132409_bfe42b7090d6_clean_up_work_queue_migration.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_01_31_132409_bfe42b7090d6_clean_up_work_queue_migration.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_01_31_132409_bfe42b7090d6_clean_up_work_queue_migration.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_02_08_152028_8d148e44e669_remove_artifact_fk.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_02_08_152028_8d148e44e669_remove_artifact_fk.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_02_08_152028_8d148e44e669_remove_artifact_fk.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_02_08_152028_8d148e44e669_remove_artifact_fk.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_03_01_165551_f3df94dca3cc_remove_flowrun_deployment_fk.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_03_01_165551_f3df94dca3cc_remove_flowrun_deployment_fk.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_03_01_165551_f3df94dca3cc_remove_flowrun_deployment_fk.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_03_01_165551_f3df94dca3cc_remove_flowrun_deployment_fk.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_03_15_123850_cf1159bd0d3c_add_artifact_description_col.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_03_15_123850_cf1159bd0d3c_add_artifact_description_col.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_03_15_123850_cf1159bd0d3c_add_artifact_description_col.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_03_15_123850_cf1159bd0d3c_add_artifact_description_col.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_03_20_153925_1d7441c031d0_remove_uq_from_artifact_table.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_03_20_153925_1d7441c031d0_remove_uq_from_artifact_table.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_03_20_153925_1d7441c031d0_remove_uq_from_artifact_table.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_03_20_153925_1d7441c031d0_remove_uq_from_artifact_table.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_03_20_184534_b9aafc3ab936_add_artifact_collection_table.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_03_20_184534_b9aafc3ab936_add_artifact_collection_table.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_03_20_184534_b9aafc3ab936_add_artifact_collection_table.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_03_20_184534_b9aafc3ab936_add_artifact_collection_table.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_03_20_194204_422f8ba9541d_add_artifact_idx.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_03_20_194204_422f8ba9541d_add_artifact_idx.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_03_20_194204_422f8ba9541d_add_artifact_idx.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_03_20_194204_422f8ba9541d_add_artifact_idx.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_04_04_115150_553920ec20e9_add_index_on_log.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_04_04_115150_553920ec20e9_add_index_on_log.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_04_04_115150_553920ec20e9_add_index_on_log.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_04_04_115150_553920ec20e9_add_index_on_log.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_04_04_172555_3e1eb8281d5e_add_cols_to_artifact_collection.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_04_04_172555_3e1eb8281d5e_add_cols_to_artifact_collection.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_04_04_172555_3e1eb8281d5e_add_cols_to_artifact_collection.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_04_04_172555_3e1eb8281d5e_add_cols_to_artifact_collection.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_04_05_120713_340f457b315f_add_column_to_deployments_for_pull_steps.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_04_05_120713_340f457b315f_add_column_to_deployments_for_pull_steps.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_04_05_120713_340f457b315f_add_column_to_deployments_for_pull_steps.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_04_05_120713_340f457b315f_add_column_to_deployments_for_pull_steps.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_04_05_134301_3d46e23593d6_add_variables.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_04_05_134301_3d46e23593d6_add_variables.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_04_05_134301_3d46e23593d6_add_variables.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_04_05_134301_3d46e23593d6_add_variables.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_04_06_122659_2dbcec43c857_migrate_artifact_data.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_04_06_122659_2dbcec43c857_migrate_artifact_data.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_04_06_122659_2dbcec43c857_migrate_artifact_data.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_04_06_122659_2dbcec43c857_migrate_artifact_data.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_08_02_113813_5b0bd3b41a23_create_concurrency_limit_v2_table.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_08_02_113813_5b0bd3b41a23_create_concurrency_limit_v2_table.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_08_02_113813_5b0bd3b41a23_create_concurrency_limit_v2_table.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_08_02_113813_5b0bd3b41a23_create_concurrency_limit_v2_table.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_09_06_084729_c2d001b7dd06_.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_09_06_084729_c2d001b7dd06_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_09_06_084729_c2d001b7dd06_.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_09_06_084729_c2d001b7dd06_.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_09_20_134145_ef674d598dd3_adds_enforce_parameter_schema_column_to_.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_09_20_134145_ef674d598dd3_adds_enforce_parameter_schema_column_to_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_09_20_134145_ef674d598dd3_adds_enforce_parameter_schema_column_to_.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_09_20_134145_ef674d598dd3_adds_enforce_parameter_schema_column_to_.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_09_21_121806_8167af8df781_make_slot_decay_per_second_not_nullable.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_09_21_121806_8167af8df781_make_slot_decay_per_second_not_nullable.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_09_21_121806_8167af8df781_make_slot_decay_per_second_not_nullable.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_09_21_121806_8167af8df781_make_slot_decay_per_second_not_nullable.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_09_25_121806_8167af8df781_remove_flow_run_id_requirement_from_task_run.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_09_25_121806_8167af8df781_remove_flow_run_id_requirement_from_task_run.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_09_25_121806_8167af8df781_remove_flow_run_id_requirement_from_task_run.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_09_25_121806_8167af8df781_remove_flow_run_id_requirement_from_task_run.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_10_12_175815_f3165ae0a213_add_last_polled_to_deployment.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_10_12_175815_f3165ae0a213_add_last_polled_to_deployment.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_10_12_175815_f3165ae0a213_add_last_polled_to_deployment.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_10_12_175815_f3165ae0a213_add_last_polled_to_deployment.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_10_30_075026_cef24af2ec34_add_block_type_name_to_block_document.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_10_30_075026_cef24af2ec34_add_block_type_name_to_block_document.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_10_30_075026_cef24af2ec34_add_block_type_name_to_block_document.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_10_30_075026_cef24af2ec34_add_block_type_name_to_block_document.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_10_30_103720_22ef3915ccd8_index_and_backfill_block_type_name.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_10_30_103720_22ef3915ccd8_index_and_backfill_block_type_name.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_10_30_103720_22ef3915ccd8_index_and_backfill_block_type_name.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_10_30_103720_22ef3915ccd8_index_and_backfill_block_type_name.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_11_20_084708_9c493c02ca6d_add_trgm_index_to_block_document_name.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_11_20_084708_9c493c02ca6d_add_trgm_index_to_block_document_name.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_11_20_084708_9c493c02ca6d_add_trgm_index_to_block_document_name.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_11_20_084708_9c493c02ca6d_add_trgm_index_to_block_document_name.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_12_07_095112_a299308852a7_create_flow_run_input_table.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_12_07_095112_a299308852a7_create_flow_run_input_table.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_12_07_095112_a299308852a7_create_flow_run_input_table.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_12_07_095112_a299308852a7_create_flow_run_input_table.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2023_12_07_121624_35659cc49969_make_flowruninput_flow_run_id_a_foreign_.py b/src/prefect/server/database/_migrations/versions/sqlite/2023_12_07_121624_35659cc49969_make_flowruninput_flow_run_id_a_foreign_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2023_12_07_121624_35659cc49969_make_flowruninput_flow_run_id_a_foreign_.py rename to src/prefect/server/database/_migrations/versions/sqlite/2023_12_07_121624_35659cc49969_make_flowruninput_flow_run_id_a_foreign_.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_01_05_101041_c63a0a6dc787_add_sender_to_flowruninput.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_01_05_101041_c63a0a6dc787_add_sender_to_flowruninput.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_01_05_101041_c63a0a6dc787_add_sender_to_flowruninput.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_01_05_101041_c63a0a6dc787_add_sender_to_flowruninput.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_01_22_120214_265eb1a2da4c_create_deployment_schedule_and_add_.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_01_22_120214_265eb1a2da4c_create_deployment_schedule_and_add_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_01_22_120214_265eb1a2da4c_create_deployment_schedule_and_add_.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_01_22_120214_265eb1a2da4c_create_deployment_schedule_and_add_.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_03_05_115258_342220764f0b_add_job_variables_column_to_flow_runs.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_03_05_115258_342220764f0b_add_job_variables_column_to_flow_runs.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_03_05_115258_342220764f0b_add_job_variables_column_to_flow_runs.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_03_05_115258_342220764f0b_add_job_variables_column_to_flow_runs.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_03_13_111316_bacc60edce16_create_csrf_token_toble.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_03_13_111316_bacc60edce16_create_csrf_token_toble.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_03_13_111316_bacc60edce16_create_csrf_token_toble.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_03_13_111316_bacc60edce16_create_csrf_token_toble.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_04_03_111618_07ed05dfd4ec_automations_models.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_04_03_111618_07ed05dfd4ec_automations_models.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_04_03_111618_07ed05dfd4ec_automations_models.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_04_03_111618_07ed05dfd4ec_automations_models.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_04_04_114538_8644a9595a08_add_deployment_version_to_flow_run.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_04_04_114538_8644a9595a08_add_deployment_version_to_flow_run.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_04_04_114538_8644a9595a08_add_deployment_version_to_flow_run.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_04_04_114538_8644a9595a08_add_deployment_version_to_flow_run.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_04_09_125712_cc510aec4689_automation_event_follower.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_04_09_125712_cc510aec4689_automation_event_follower.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_04_09_125712_cc510aec4689_automation_event_follower.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_04_09_125712_cc510aec4689_automation_event_follower.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_04_09_131832_2b6c2b548f95_trigger_in_index.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_04_09_131832_2b6c2b548f95_trigger_in_index.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_04_09_131832_2b6c2b548f95_trigger_in_index.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_04_09_131832_2b6c2b548f95_trigger_in_index.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_04_10_104304_824e9edafa60_adds_events_tables.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_04_10_104304_824e9edafa60_adds_events_tables.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_04_10_104304_824e9edafa60_adds_events_tables.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_04_10_104304_824e9edafa60_adds_events_tables.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_04_23_094701_75c8f17b8b51_work_status_fields.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_04_23_094701_75c8f17b8b51_work_status_fields.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_04_23_094701_75c8f17b8b51_work_status_fields.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_04_23_094701_75c8f17b8b51_work_status_fields.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_04_25_155120_a8e62d4c72cf_worker_status_field.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_04_25_155120_a8e62d4c72cf_worker_status_field.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_04_25_155120_a8e62d4c72cf_worker_status_field.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_04_25_155120_a8e62d4c72cf_worker_status_field.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_05_01_103824_20fbd53b3cef_add_catchup_fields_to_deploymentschedule.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_05_01_103824_20fbd53b3cef_add_catchup_fields_to_deploymentschedule.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_05_01_103824_20fbd53b3cef_add_catchup_fields_to_deploymentschedule.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_05_01_103824_20fbd53b3cef_add_catchup_fields_to_deploymentschedule.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_05_21_123101_2ac65f1758c2_json_variables.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_05_21_123101_2ac65f1758c2_json_variables.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_05_21_123101_2ac65f1758c2_json_variables.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_05_21_123101_2ac65f1758c2_json_variables.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_07_15_145350_354f1ede7e9f_adding_scope_to_followers.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_07_15_145350_354f1ede7e9f_adding_scope_to_followers.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_07_15_145350_354f1ede7e9f_adding_scope_to_followers.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_07_15_145350_354f1ede7e9f_adding_scope_to_followers.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_08_14_145052_f93e1439f022_add_deployment_concurrency_limit.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_08_14_145052_f93e1439f022_add_deployment_concurrency_limit.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_08_14_145052_f93e1439f022_add_deployment_concurrency_limit.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_08_14_145052_f93e1439f022_add_deployment_concurrency_limit.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_09_11_090106_7d6350aea855_add_concurrency_options.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_09_11_090106_7d6350aea855_add_concurrency_options.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_09_11_090106_7d6350aea855_add_concurrency_options.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_09_11_090106_7d6350aea855_add_concurrency_options.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_09_16_162719_4ad4658cbefe_add_deployment_to_global_concurrency_.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_09_16_162719_4ad4658cbefe_add_deployment_to_global_concurrency_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_09_16_162719_4ad4658cbefe_add_deployment_to_global_concurrency_.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_09_16_162719_4ad4658cbefe_add_deployment_to_global_concurrency_.py diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_11_15_151042_5952a5498b51_add_labels_column_to_flow_flowrun_.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_11_15_151042_5952a5498b51_add_labels_column_to_flow_flowrun_.py similarity index 100% rename from src/prefect/server/database/migrations/versions/sqlite/2024_11_15_151042_5952a5498b51_add_labels_column_to_flow_flowrun_.py rename to src/prefect/server/database/_migrations/versions/sqlite/2024_11_15_151042_5952a5498b51_add_labels_column_to_flow_flowrun_.py diff --git a/src/prefect/server/database/_migrations/versions/sqlite/2024_12_04_144924_a49711513ad4_sync_orm_models_and_migrations.py b/src/prefect/server/database/_migrations/versions/sqlite/2024_12_04_144924_a49711513ad4_sync_orm_models_and_migrations.py new file mode 100644 index 000000000000..f94cb7b6bed8 --- /dev/null +++ b/src/prefect/server/database/_migrations/versions/sqlite/2024_12_04_144924_a49711513ad4_sync_orm_models_and_migrations.py @@ -0,0 +1,127 @@ +"""Sync ORM models and migrations + +Revision ID: a49711513ad4 +Revises: 5952a5498b51 +Create Date: 2024-12-04 14:49:24.099491 + +""" + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import sqlite + +# revision identifiers, used by Alembic. +revision = "a49711513ad4" +down_revision = "5952a5498b51" +branch_labels = None +depends_on = None + + +def upgrade(): + with op.batch_alter_table("artifact_collection", schema=None) as batch_op: + # table added in 027c123512befd2bd00a0ef28bd44215e77bece6 but index was + # never created in a migration. + batch_op.create_index( + batch_op.f("ix_artifact_collection__updated"), ["updated"], unique=False + ) + # index created on the wrong table in ca9f93463a4c38fce8be972d91e808b5935e5d9c + batch_op.drop_index("ix_artifact__key_created_desc") + + with op.batch_alter_table("artifact", schema=None) as batch_op: + # index created on the wrong table in ca9f93463a4c38fce8be972d91e808b5935e5d9c + batch_op.create_index( + "ix_artifact__key_created_desc", + ["key", sa.text("created DESC")], + unique=False, + postgresql_include=["id", "updated", "type", "task_run_id", "flow_run_id"], + ) + + with op.batch_alter_table("block_document", schema=None) as batch_op: + # Renamed index to remain consistent with PostgreSQL + batch_op.drop_index("ix_block_document__block_type_name_name") + batch_op.create_index( + "ix_block_document__block_type_name__name", + ["block_type_name", "name"], + unique=False, + ) + + with op.batch_alter_table("deployment", schema=None) as batch_op: + # columns removed in c53b00bfa1f6850ab43e168c92c627350c090647 + batch_op.drop_column("schedule") + batch_op.drop_column("is_schedule_active") + + # column removed in 5784c637e7e11a8e88e2b3146e54e9b6c97d50ef + batch_op.drop_column("flow_data") + + # column removed in eaa7a5063c73718dff56ce4aeb66e53fcafe60e5 + batch_op.drop_column("manifest_path") + + with op.batch_alter_table("deployment_schedule", schema=None) as batch_op: + # columns removed from orm models in 0b62de684447c6955e04c722c276edac4002fd40 + batch_op.drop_column("catchup") + batch_op.drop_column("max_active_runs") + + with op.batch_alter_table("flow_run", schema=None) as batch_op: + # Column is no longer a FK since d10c7471a69403bcf88f401091497a2dc8963885 + batch_op.drop_index("ix_flow_run__deployment_id") + # Index accidentally dropped in 519a2ed6e31e2b60136e1a1a163a9cd0a8d3d5c4 + batch_op.create_index( + "ix_flow_run__scheduler_deployment_id_auto_scheduled_next_schedu", + ["deployment_id", "auto_scheduled", "next_scheduled_start_time"], + unique=False, + postgresql_where=sa.text("state_type = 'SCHEDULED'::state_type"), + sqlite_where=sa.text("state_type = 'SCHEDULED'"), + ) + + +def downgrade(): + with op.batch_alter_table("flow_run", schema=None) as batch_op: + batch_op.drop_index( + "ix_flow_run__scheduler_deployment_id_auto_scheduled_next_schedu", + postgresql_where=sa.text("state_type = 'SCHEDULED'::state_type"), + sqlite_where=sa.text("state_type = 'SCHEDULED'"), + ) + batch_op.create_index( + "ix_flow_run__deployment_id", ["deployment_id"], unique=False + ) + + with op.batch_alter_table("deployment_schedule", schema=None) as batch_op: + batch_op.add_column(sa.Column("max_active_runs", sa.INTEGER(), nullable=True)) + batch_op.add_column( + sa.Column( + "catchup", sa.BOOLEAN(), server_default=sa.text("'0'"), nullable=False + ) + ) + + with op.batch_alter_table("deployment", schema=None) as batch_op: + batch_op.add_column( + sa.Column( + "is_schedule_active", + sa.BOOLEAN(), + server_default=sa.text("'1'"), + nullable=False, + ) + ) + batch_op.add_column(sa.Column("flow_data", sqlite.JSON(), nullable=True)) + batch_op.add_column(sa.Column("schedule", sqlite.JSON(), nullable=True)) + batch_op.add_column(sa.Column("manifest_path", sa.VARCHAR(), nullable=True)) + + with op.batch_alter_table("block_document", schema=None) as batch_op: + batch_op.drop_index("ix_block_document__block_type_name__name") + batch_op.create_index( + "ix_block_document__block_type_name_name", + ["block_type_name", "name"], + unique=False, + ) + + with op.batch_alter_table("artifact", schema=None) as batch_op: + batch_op.drop_index( + "ix_artifact__key_created_desc", + postgresql_include=["id", "updated", "type", "task_run_id", "flow_run_id"], + ) + + with op.batch_alter_table("artifact_collection", schema=None) as batch_op: + batch_op.create_index( + "ix_artifact__key_created_desc", ["key", "created"], unique=False + ) + batch_op.drop_index(batch_op.f("ix_artifact_collection__updated")) diff --git a/src/prefect/server/database/alembic.ini b/src/prefect/server/database/alembic.ini index 5d8eb592d893..a834d0348775 100644 --- a/src/prefect/server/database/alembic.ini +++ b/src/prefect/server/database/alembic.ini @@ -1,5 +1,5 @@ [alembic] -script_location = prefect:server:database:migrations +script_location = prefect:server:database:_migrations prepend_sys_path = . revision_environment = true diff --git a/src/prefect/server/database/alembic_commands.py b/src/prefect/server/database/alembic_commands.py index 37aa6af5e7e9..bfeec523ee90 100644 --- a/src/prefect/server/database/alembic_commands.py +++ b/src/prefect/server/database/alembic_commands.py @@ -6,7 +6,7 @@ from sqlalchemy.exc import SAWarning -import prefect.server +import prefect.server.database ALEMBIC_LOCK = Lock() diff --git a/src/prefect/server/database/orm_models.py b/src/prefect/server/database/orm_models.py index cfdc3c0645cd..49d89e6b6559 100644 --- a/src/prefect/server/database/orm_models.py +++ b/src/prefect/server/database/orm_models.py @@ -2,11 +2,22 @@ import uuid from abc import ABC, abstractmethod from pathlib import Path -from typing import Any, Dict, Hashable, List, Tuple, Union, cast +from typing import ( + TYPE_CHECKING, + Any, + ClassVar, + Dict, + Hashable, + Iterable, + Optional, + Union, + cast, +) import pendulum import sqlalchemy as sa from sqlalchemy import FetchedValue +from sqlalchemy.ext.asyncio import AsyncSession from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import ( DeclarativeBase, @@ -14,12 +25,12 @@ declared_attr, mapped_column, registry, + relationship, synonym, ) -from sqlalchemy.sql.expression import ColumnElement +from sqlalchemy.sql import roles from sqlalchemy.sql.functions import coalesce -import prefect import prefect.server.schemas as schemas from prefect.server.events.actions import ServerActionTypes from prefect.server.events.schemas.automations import ( @@ -55,7 +66,7 @@ class Base(DeclarativeBase): and provides ID, created, and updated columns """ - registry = registry( + registry: ClassVar[sa.orm.registry] = registry( metadata=sa.schema.MetaData( # define naming conventions for our Base class to use # sqlalchemy will use the following templated strings @@ -82,6 +93,7 @@ class Base(DeclarativeBase): ), type_annotation_map={ uuid.UUID: UUID, + pendulum.DateTime: Timestamp, }, ) @@ -93,13 +105,13 @@ class Base(DeclarativeBase): # an INSERT, for example # # https://docs.sqlalchemy.org/en/14/orm/extensions/asyncio.html#preventing-implicit-io-when-using-asyncsession - __mapper_args__ = {"eager_defaults": True} + __mapper_args__: dict[str, Any] = {"eager_defaults": True} - def __repr__(self): + def __repr__(self) -> str: return f"{self.__class__.__name__}(id={self.id})" - @declared_attr - def __tablename__(cls): + @declared_attr.directive + def __tablename__(cls) -> str: """ By default, turn the model's camel-case class name into a snake-case table name. Override by providing @@ -114,18 +126,13 @@ def __tablename__(cls): ) created: Mapped[pendulum.DateTime] = mapped_column( - Timestamp(), - nullable=False, - server_default=now(), - default=lambda: pendulum.now("UTC"), + server_default=now(), default=lambda: pendulum.now("UTC") ) # onupdate is only called when statements are actually issued # against the database. until COMMIT is issued, this column # will not be updated - updated = sa.Column( - Timestamp(), - nullable=False, + updated: Mapped[pendulum.DateTime] = mapped_column( index=True, server_default=now(), default=lambda: pendulum.now("UTC"), @@ -137,93 +144,87 @@ def __tablename__(cls): class Flow(Base): """SQLAlchemy mixin of a flow.""" - name = sa.Column(sa.String, nullable=False) - tags: Mapped[List[str]] = mapped_column( - JSON, server_default="[]", default=list, nullable=False + name: Mapped[str] + tags: Mapped[list[str]] = mapped_column(JSON, server_default="[]", default=list) + labels: Mapped[Optional[schemas.core.KeyValueLabels]] = mapped_column(JSON) + + flow_runs: Mapped[list["FlowRun"]] = relationship( + back_populates="flow", lazy="raise" ) - labels: Mapped[Union[schemas.core.KeyValueLabels, None]] = mapped_column( - JSON, nullable=True + deployments: Mapped[list["Deployment"]] = relationship( + back_populates="flow", lazy="raise" ) - flow_runs = sa.orm.relationship("FlowRun", back_populates="flow", lazy="raise") - deployments = sa.orm.relationship("Deployment", back_populates="flow", lazy="raise") - - __table_args__ = ( + __table_args__: Any = ( sa.UniqueConstraint("name"), sa.Index("ix_flow__created", "created"), + sa.Index("trgm_ix_flow_name", "name", postgresql_using="gin").ddl_if( + dialect="postgresql" + ), ) class FlowRunState(Base): """SQLAlchemy mixin of a flow run state.""" - flow_run_id = sa.Column( - UUID(), sa.ForeignKey("flow_run.id", ondelete="cascade"), nullable=False + flow_run_id: Mapped[uuid.UUID] = mapped_column( + sa.ForeignKey("flow_run.id", ondelete="cascade") ) - type = sa.Column( - sa.Enum(schemas.states.StateType, name="state_type"), nullable=False, index=True + type: Mapped[schemas.states.StateType] = mapped_column( + sa.Enum(schemas.states.StateType, name="state_type"), index=True ) - timestamp = sa.Column( - Timestamp(), - nullable=False, - server_default=now(), - default=lambda: pendulum.now("UTC"), + timestamp: Mapped[pendulum.DateTime] = mapped_column( + server_default=now(), default=lambda: pendulum.now("UTC") ) - name = sa.Column(sa.String, nullable=False, index=True) - message = sa.Column(sa.String) - state_details = sa.Column( + name: Mapped[str] = mapped_column(index=True) + message: Mapped[Optional[str]] + state_details: Mapped[schemas.states.StateDetails] = mapped_column( Pydantic(schemas.states.StateDetails), server_default="{}", default=schemas.states.StateDetails, - nullable=False, ) - _data = sa.Column(sa.JSON, nullable=True, name="data") + _data: Mapped[Optional[Any]] = mapped_column(JSON, name="data") - result_artifact_id = sa.Column( - UUID(), - sa.ForeignKey( - "artifact.id", - ondelete="SET NULL", - use_alter=True, - ), + result_artifact_id: Mapped[Optional[uuid.UUID]] = mapped_column( + sa.ForeignKey("artifact.id", ondelete="SET NULL", use_alter=True), index=True, ) - _result_artifact = sa.orm.relationship( - "Artifact", + _result_artifact: Mapped[Optional["Artifact"]] = relationship( lazy="selectin", foreign_keys=[result_artifact_id], primaryjoin="Artifact.id==FlowRunState.result_artifact_id", ) @hybrid_property - def data(self): + def data(self) -> Optional[Any]: if self._data: # ensures backwards compatibility for results stored on state objects return self._data if not self.result_artifact_id: # do not try to load the relationship if there's no artifact id return None + if TYPE_CHECKING: + assert self._result_artifact is not None return self._result_artifact.data - flow_run = sa.orm.relationship( - "FlowRun", - lazy="raise", - foreign_keys=[flow_run_id], - ) + flow_run: Mapped["FlowRun"] = relationship(lazy="raise", foreign_keys=[flow_run_id]) def as_state(self) -> schemas.states.State: return schemas.states.State.model_validate(self, from_attributes=True) - __table_args__ = ( - sa.Index( - "uq_flow_run_state__flow_run_id_timestamp_desc", - "flow_run_id", - sa.desc("timestamp"), - unique=True, - ), - ) + @declared_attr.directive + @classmethod + def __table_args__(cls) -> Iterable[sa.Index]: + return ( + sa.Index( + "uq_flow_run_state__flow_run_id_timestamp_desc", + cls.flow_run_id, + cls.timestamp.desc(), + unique=True, + ), + ) class TaskRunState(Base): @@ -231,73 +232,63 @@ class TaskRunState(Base): # this column isn't explicitly indexed because it is included in # the unique compound index on (task_run_id, timestamp) - task_run_id = sa.Column( - UUID(), sa.ForeignKey("task_run.id", ondelete="cascade"), nullable=False + task_run_id: Mapped[uuid.UUID] = mapped_column( + sa.ForeignKey("task_run.id", ondelete="cascade") ) - type = sa.Column( - sa.Enum(schemas.states.StateType, name="state_type"), nullable=False, index=True + type: Mapped[schemas.states.StateType] = mapped_column( + sa.Enum(schemas.states.StateType, name="state_type"), index=True ) - timestamp = sa.Column( - Timestamp(), - nullable=False, - server_default=now(), - default=lambda: pendulum.now("UTC"), + timestamp: Mapped[pendulum.DateTime] = mapped_column( + server_default=now(), default=lambda: pendulum.now("UTC") ) - name = sa.Column(sa.String, nullable=False, index=True) - message = sa.Column(sa.String) - state_details = sa.Column( + name: Mapped[str] = mapped_column(index=True) + message: Mapped[Optional[str]] + state_details: Mapped[schemas.states.StateDetails] = mapped_column( Pydantic(schemas.states.StateDetails), server_default="{}", default=schemas.states.StateDetails, - nullable=False, ) - _data = sa.Column(sa.JSON, nullable=True, name="data") + _data: Mapped[Optional[Any]] = mapped_column(JSON, name="data") - result_artifact_id = sa.Column( - UUID(), - sa.ForeignKey( - "artifact.id", - ondelete="SET NULL", - use_alter=True, - ), - index=True, + result_artifact_id: Mapped[Optional[uuid.UUID]] = mapped_column( + sa.ForeignKey("artifact.id", ondelete="SET NULL", use_alter=True), index=True ) - _result_artifact = sa.orm.relationship( - "Artifact", + _result_artifact: Mapped[Optional["Artifact"]] = relationship( lazy="selectin", foreign_keys=[result_artifact_id], primaryjoin="Artifact.id==TaskRunState.result_artifact_id", ) @hybrid_property - def data(self): + def data(self) -> Optional[Any]: if self._data: # ensures backwards compatibility for results stored on state objects return self._data if not self.result_artifact_id: # do not try to load the relationship if there's no artifact id return None + if TYPE_CHECKING: + assert self._result_artifact is not None return self._result_artifact.data - task_run = sa.orm.relationship( - "TaskRun", - lazy="raise", - foreign_keys=[task_run_id], - ) + task_run: Mapped["TaskRun"] = relationship(lazy="raise", foreign_keys=[task_run_id]) def as_state(self) -> schemas.states.State: return schemas.states.State.model_validate(self, from_attributes=True) - __table_args__ = ( - sa.Index( - "uq_task_run_state__task_run_id_timestamp_desc", - "task_run_id", - sa.desc("timestamp"), - unique=True, - ), - ) + @declared_attr.directive + @classmethod + def __table_args__(cls) -> Iterable[sa.Index]: + return ( + sa.Index( + "uq_task_run_state__task_run_id_timestamp_desc", + cls.task_run_id, + cls.timestamp.desc(), + unique=True, + ), + ) class Artifact(Base): @@ -305,63 +296,57 @@ class Artifact(Base): SQLAlchemy model of artifacts. """ - key = sa.Column( - sa.String, - nullable=True, - index=True, - ) + key: Mapped[Optional[str]] = mapped_column(index=True) - task_run_id = sa.Column( - UUID(), - nullable=True, - index=True, - ) + task_run_id: Mapped[Optional[uuid.UUID]] = mapped_column(index=True) - flow_run_id = sa.Column( - UUID(), - nullable=True, - index=True, - ) + flow_run_id: Mapped[Optional[uuid.UUID]] = mapped_column(index=True) - type = sa.Column(sa.String) - data = sa.Column(sa.JSON, nullable=True) - description = sa.Column(sa.String, nullable=True) + type: Mapped[Optional[str]] + data: Mapped[Optional[Any]] = mapped_column(sa.JSON) + description: Mapped[Optional[str]] # Suffixed with underscore as attribute name 'metadata' is reserved for the MetaData instance when using a declarative base class. - metadata_ = sa.Column(sa.JSON, nullable=True) + metadata_: Mapped[Optional[dict[str, str]]] = mapped_column(sa.JSON) - __table_args__ = ( - sa.Index( - "ix_artifact__key", - "key", - ), - ) + @declared_attr.directive + @classmethod + def __table_args__(cls) -> Iterable[sa.Index]: + return ( + sa.Index( + "ix_artifact__key", + cls.key, + ), + sa.Index( + "ix_artifact__key_created_desc", + cls.key, + cls.created.desc(), + postgresql_include=[ + "id", + "updated", + "type", + "task_run_id", + "flow_run_id", + ], + ), + ) class ArtifactCollection(Base): - key = sa.Column( - sa.String, - nullable=False, - ) + key: Mapped[str] - latest_id: Mapped[UUID] = mapped_column(UUID(), nullable=False) + latest_id: Mapped[uuid.UUID] - task_run_id = sa.Column( - UUID(), - nullable=True, - ) + task_run_id: Mapped[Optional[uuid.UUID]] - flow_run_id = sa.Column( - UUID(), - nullable=True, - ) + flow_run_id: Mapped[Optional[uuid.UUID]] - type = sa.Column(sa.String) - data = sa.Column(sa.JSON, nullable=True) - description = sa.Column(sa.String, nullable=True) - metadata_ = sa.Column(sa.JSON, nullable=True) + type: Mapped[Optional[str]] + data: Mapped[Optional[Any]] = mapped_column(sa.JSON) + description: Mapped[Optional[str]] + metadata_: Mapped[Optional[dict[str, str]]] = mapped_column(sa.JSON) - __table_args__ = ( + __table_args__: Any = ( sa.UniqueConstraint("key"), sa.Index( "ix_artifact_collection__key_latest_id", @@ -376,20 +361,20 @@ class TaskRunStateCache(Base): SQLAlchemy model of a task run state cache. """ - cache_key = sa.Column(sa.String, nullable=False) - cache_expiration = sa.Column( - Timestamp(), - nullable=True, - ) - task_run_state_id = sa.Column(UUID(), nullable=False) + cache_key: Mapped[str] = mapped_column() + cache_expiration: Mapped[Optional[pendulum.DateTime]] + task_run_state_id: Mapped[uuid.UUID] - __table_args__ = ( - sa.Index( - "ix_task_run_state_cache__cache_key_created_desc", - "cache_key", - sa.desc("created"), - ), - ) + @declared_attr.directive + @classmethod + def __table_args__(cls) -> Iterable[sa.Index]: + return ( + sa.Index( + "ix_task_run_state_cache__cache_key_created_desc", + cls.cache_key, + cls.created.desc(), + ), + ) class Run(Base): @@ -399,41 +384,36 @@ class Run(Base): __abstract__ = True - name: Mapped[str] = mapped_column( - sa.String, - default=lambda: generate_slug(2), - nullable=False, - index=True, - ) - state_type = sa.Column(sa.Enum(schemas.states.StateType, name="state_type")) - state_name = sa.Column(sa.String, nullable=True) - state_timestamp: Mapped[Union[pendulum.DateTime, None]] = mapped_column( - Timestamp(), nullable=True - ) - run_count = sa.Column(sa.Integer, server_default="0", default=0, nullable=False) - expected_start_time: Mapped[pendulum.DateTime] = mapped_column(Timestamp()) - next_scheduled_start_time = sa.Column(Timestamp()) - start_time: Mapped[pendulum.DateTime] = mapped_column(Timestamp()) - end_time: Mapped[pendulum.DateTime] = mapped_column(Timestamp()) + name: Mapped[str] = mapped_column(default=lambda: generate_slug(2), index=True) + state_type: Mapped[Optional[schemas.states.StateType]] = mapped_column( + sa.Enum(schemas.states.StateType, name="state_type") + ) + state_name: Mapped[Optional[str]] + state_timestamp: Mapped[Optional[pendulum.DateTime]] + run_count: Mapped[int] = mapped_column(server_default="0", default=0) + expected_start_time: Mapped[Optional[pendulum.DateTime]] + next_scheduled_start_time: Mapped[Optional[pendulum.DateTime]] + start_time: Mapped[Optional[pendulum.DateTime]] + end_time: Mapped[Optional[pendulum.DateTime]] total_run_time: Mapped[datetime.timedelta] = mapped_column( - sa.Interval(), - server_default="0", - default=datetime.timedelta(0), - nullable=False, + server_default="0", default=datetime.timedelta(0) ) @hybrid_property - def estimated_run_time(self): + def estimated_run_time(self) -> datetime.timedelta: """Total run time is incremented in the database whenever a RUNNING state is exited. To give up-to-date estimates, we estimate incremental run time for any runs currently in a RUNNING state.""" if self.state_type and self.state_type == schemas.states.StateType.RUNNING: + if TYPE_CHECKING: + assert self.state_timestamp is not None return self.total_run_time + (pendulum.now("UTC") - self.state_timestamp) else: return self.total_run_time - @estimated_run_time.expression - def estimated_run_time(cls): + @estimated_run_time.inplace.expression + @classmethod + def _estimated_run_time_expression(cls) -> sa.Label[datetime.timedelta]: return ( sa.select( sa.case( @@ -460,7 +440,11 @@ def estimated_start_time_delta(self) -> datetime.timedelta: give up-to-date estimates, we estimate lateness for any runs that don't have a start time and are not in a final state and were expected to start already.""" - if self.start_time and self.start_time > self.expected_start_time: + if ( + self.start_time + and self.expected_start_time is not None + and self.start_time > (self.expected_start_time) + ): return self.start_time - self.expected_start_time elif ( self.start_time is None @@ -472,8 +456,11 @@ def estimated_start_time_delta(self) -> datetime.timedelta: else: return datetime.timedelta(0) - @estimated_start_time_delta.expression - def estimated_start_time_delta(cls): + @estimated_start_time_delta.inplace.expression + @classmethod + def _estimated_start_time_delta_expression( + cls, + ) -> sa.SQLColumnExpression[datetime.timedelta]: return sa.case( ( cls.start_time > cls.expected_start_time, @@ -495,97 +482,72 @@ class FlowRun(Run): """SQLAlchemy model of a flow run.""" flow_id: Mapped[uuid.UUID] = mapped_column( - UUID(), - sa.ForeignKey("flow.id", ondelete="cascade"), - nullable=False, - index=True, + sa.ForeignKey("flow.id", ondelete="cascade"), index=True ) - deployment_id: Mapped[Union[uuid.UUID, None]] = mapped_column(UUID(), nullable=True) - work_queue_name = sa.Column(sa.String, index=True) - flow_version = sa.Column(sa.String, index=True) - deployment_version = sa.Column(sa.String, index=True) - parameters = sa.Column(JSON, server_default="{}", default=dict, nullable=False) - idempotency_key = sa.Column(sa.String) - context = sa.Column(JSON, server_default="{}", default=dict, nullable=False) - empirical_policy = sa.Column( + deployment_id: Mapped[Optional[uuid.UUID]] = mapped_column() + work_queue_name: Mapped[Optional[str]] = mapped_column(index=True) + flow_version: Mapped[Optional[str]] = mapped_column(index=True) + deployment_version: Mapped[Optional[str]] = mapped_column(index=True) + parameters: Mapped[dict[str, Any]] = mapped_column( + JSON, server_default="{}", default=dict + ) + idempotency_key: Mapped[Optional[str]] = mapped_column() + context: Mapped[dict[str, Any]] = mapped_column( + JSON, server_default="{}", default=dict + ) + empirical_policy: Mapped[schemas.core.FlowRunPolicy] = mapped_column( Pydantic(schemas.core.FlowRunPolicy), server_default="{}", default=schemas.core.FlowRunPolicy, - nullable=False, - ) - tags: Mapped[List[str]] = mapped_column( - JSON, server_default="[]", default=list, nullable=False - ) - labels: Mapped[Union[schemas.core.KeyValueLabels, None]] = mapped_column( - JSON, nullable=True ) + tags: Mapped[list[str]] = mapped_column(JSON, server_default="[]", default=list) + labels: Mapped[Optional[schemas.core.KeyValueLabels]] = mapped_column(JSON) - created_by: Mapped[Union[schemas.core.CreatedBy, None]] = mapped_column( - Pydantic(schemas.core.CreatedBy), - server_default=None, - default=None, - nullable=True, + created_by: Mapped[Optional[schemas.core.CreatedBy]] = mapped_column( + Pydantic(schemas.core.CreatedBy) ) - infrastructure_pid = sa.Column(sa.String) - job_variables = sa.Column(JSON, server_default="{}", default=dict, nullable=True) - - infrastructure_document_id = sa.Column( - UUID, - sa.ForeignKey("block_document.id", ondelete="CASCADE"), - nullable=True, - index=True, + infrastructure_pid: Mapped[Optional[str]] + job_variables: Mapped[Optional[dict[str, Any]]] = mapped_column( + JSON, server_default="{}", default=dict ) - parent_task_run_id: Mapped[uuid.UUID] = mapped_column( - UUID(), - sa.ForeignKey( - "task_run.id", - ondelete="SET NULL", - use_alter=True, - ), - index=True, + infrastructure_document_id: Mapped[Optional[uuid.UUID]] = mapped_column( + sa.ForeignKey("block_document.id", ondelete="CASCADE"), index=True ) - auto_scheduled = sa.Column( - sa.Boolean, server_default="0", default=False, nullable=False + parent_task_run_id: Mapped[Optional[uuid.UUID]] = mapped_column( + sa.ForeignKey("task_run.id", ondelete="SET NULL", use_alter=True), index=True ) + auto_scheduled: Mapped[bool] = mapped_column(server_default="0", default=False) + # TODO remove this foreign key for significant delete performance gains - state_id = sa.Column( - UUID(), - sa.ForeignKey( - "flow_run_state.id", - ondelete="SET NULL", - use_alter=True, - ), + state_id: Mapped[Optional[uuid.UUID]] = mapped_column( + sa.ForeignKey("flow_run_state.id", ondelete="SET NULL", use_alter=True), index=True, ) - work_queue_id: Mapped[Union[uuid.UUID, None]] = mapped_column( - UUID, - sa.ForeignKey("work_queue.id", ondelete="SET NULL"), - nullable=True, - index=True, + work_queue_id: Mapped[Optional[uuid.UUID]] = mapped_column( + sa.ForeignKey("work_queue.id", ondelete="SET NULL"), index=True ) # -------------------------- relationships # current states are eagerly loaded unless otherwise specified - _state = sa.orm.relationship( - "FlowRunState", + _state: Mapped[Optional["FlowRunState"]] = relationship( lazy="selectin", foreign_keys=[state_id], primaryjoin="FlowRunState.id==FlowRun.state_id", ) @hybrid_property - def state(self): + def state(self) -> Optional[FlowRunState]: return self._state - @state.setter - def state(self, value): + @state.inplace.setter + def _set_state(self, value: Optional[FlowRunState]) -> None: # because this is a slightly non-standard SQLAlchemy relationship, we # prefer an explicit setter method to a setter property, because # user expectations about SQLAlchemy attribute assignment might not be @@ -594,7 +556,7 @@ def state(self, value): # still works because the ORM model's __init__ depends on it. return self.set_state(value) - def set_state(self, state): + def set_state(self, state: Optional[FlowRunState]) -> None: """ If a state is assigned to this run, populate its run id. @@ -606,150 +568,137 @@ def set_state(self, state): state.flow_run_id = self.id self._state = state - flow = sa.orm.relationship("Flow", back_populates="flow_runs", lazy="raise") + flow: Mapped["Flow"] = relationship(back_populates="flow_runs", lazy="raise") - task_runs = sa.orm.relationship( - "TaskRun", + task_runs: Mapped[list["TaskRun"]] = relationship( back_populates="flow_run", lazy="raise", # foreign_keys=lambda: [flow_run_id], primaryjoin="TaskRun.flow_run_id==FlowRun.id", ) - parent_task_run = sa.orm.relationship( - "TaskRun", + parent_task_run: Mapped[Optional["TaskRun"]] = relationship( back_populates="subflow_run", lazy="raise", foreign_keys=[parent_task_run_id], ) - work_queue = sa.orm.relationship( - "WorkQueue", - lazy="selectin", - foreign_keys=[work_queue_id], + work_queue: Mapped[Optional["WorkQueue"]] = relationship( + lazy="selectin", foreign_keys=[work_queue_id] ) - __table_args__ = ( - sa.Index( - "uq_flow_run__flow_id_idempotency_key", - "flow_id", - "idempotency_key", - unique=True, - ), - sa.Index( - "ix_flow_run__coalesce_start_time_expected_start_time_desc", - sa.desc(coalesce("start_time", "expected_start_time")), - ), - sa.Index( - "ix_flow_run__coalesce_start_time_expected_start_time_asc", - sa.asc(coalesce("start_time", "expected_start_time")), - ), - sa.Index( - "ix_flow_run__expected_start_time_desc", - sa.desc("expected_start_time"), - ), - sa.Index( - "ix_flow_run__next_scheduled_start_time_asc", - sa.asc("next_scheduled_start_time"), - ), - sa.Index( - "ix_flow_run__end_time_desc", - sa.desc("end_time"), - ), - sa.Index( - "ix_flow_run__start_time", - "start_time", - ), - sa.Index( - "ix_flow_run__state_type", - "state_type", - ), - sa.Index( - "ix_flow_run__state_name", - "state_name", - ), - sa.Index( - "ix_flow_run__state_timestamp", - "state_timestamp", - ), - ) + @declared_attr.directive + @classmethod + def __table_args__(cls) -> Iterable[sa.Index]: + return ( + sa.Index( + "uq_flow_run__flow_id_idempotency_key", + cls.flow_id, + cls.idempotency_key, + unique=True, + ), + sa.Index( + "ix_flow_run__coalesce_start_time_expected_start_time_desc", + coalesce(cls.start_time, cls.expected_start_time).desc(), + ), + sa.Index( + "ix_flow_run__coalesce_start_time_expected_start_time_asc", + coalesce(cls.start_time, cls.expected_start_time).asc(), + ), + sa.Index( + "ix_flow_run__expected_start_time_desc", + cls.expected_start_time.desc(), + ), + sa.Index( + "ix_flow_run__next_scheduled_start_time_asc", + cls.next_scheduled_start_time.asc(), + ), + sa.Index( + "ix_flow_run__end_time_desc", + cls.end_time.desc(), + ), + sa.Index( + "ix_flow_run__start_time", + cls.start_time, + ), + sa.Index( + "ix_flow_run__state_type", + cls.state_type, + ), + sa.Index( + "ix_flow_run__state_name", + cls.state_name, + ), + sa.Index( + "ix_flow_run__state_timestamp", + cls.state_timestamp, + ), + sa.Index("trgm_ix_flow_run_name", cls.name, postgresql_using="gin").ddl_if( + dialect="postgresql" + ), + sa.Index( + # index names are at most 63 characters long. + "ix_flow_run__scheduler_deployment_id_auto_scheduled_next_schedu", + cls.deployment_id, + cls.auto_scheduled, + cls.next_scheduled_start_time, + postgresql_where=cls.state_type == schemas.states.StateType.SCHEDULED, + sqlite_where=cls.state_type == schemas.states.StateType.SCHEDULED, + ), + ) + + +_TaskInput = Union[ + schemas.core.TaskRunResult, schemas.core.Parameter, schemas.core.Constant +] +_TaskInputs = dict[str, list[_TaskInput]] class TaskRun(Run): """SQLAlchemy model of a task run.""" - flow_run_id = sa.Column( - UUID(), - sa.ForeignKey("flow_run.id", ondelete="cascade"), - nullable=True, - index=True, + flow_run_id: Mapped[Optional[uuid.UUID]] = mapped_column( + sa.ForeignKey("flow_run.id", ondelete="cascade"), index=True ) - task_key = sa.Column(sa.String, nullable=False) - dynamic_key = sa.Column(sa.String, nullable=False) - cache_key = sa.Column(sa.String) - cache_expiration = sa.Column(Timestamp()) - task_version = sa.Column(sa.String) - flow_run_run_count = sa.Column( - sa.Integer, server_default="0", default=0, nullable=False - ) - empirical_policy = sa.Column( + task_key: Mapped[str] = mapped_column() + dynamic_key: Mapped[str] = mapped_column() + cache_key: Mapped[Optional[str]] + cache_expiration: Mapped[Optional[pendulum.DateTime]] + task_version: Mapped[Optional[str]] + flow_run_run_count: Mapped[int] = mapped_column(server_default="0", default=0) + empirical_policy: Mapped[schemas.core.TaskRunPolicy] = mapped_column( Pydantic(schemas.core.TaskRunPolicy), server_default="{}", default=schemas.core.TaskRunPolicy, - nullable=False, - ) - task_inputs = sa.Column( - Pydantic( - Dict[ - str, - List[ - Union[ - schemas.core.TaskRunResult, - schemas.core.Parameter, - schemas.core.Constant, - ] - ], - ] - ), - server_default="{}", - default=dict, - nullable=False, - ) - tags: Mapped[List[str]] = mapped_column( - JSON, server_default="[]", default=list, nullable=False ) - labels: Mapped[Union[schemas.core.KeyValueLabels, None]] = mapped_column( - JSON, nullable=True + task_inputs: Mapped[_TaskInputs] = mapped_column( + Pydantic(_TaskInputs), server_default="{}", default=dict ) + tags: Mapped[list[str]] = mapped_column(JSON, server_default="[]", default=list) + labels: Mapped[Optional[schemas.core.KeyValueLabels]] = mapped_column(JSON) # TODO remove this foreign key for significant delete performance gains - state_id = sa.Column( - UUID(), - sa.ForeignKey( - "task_run_state.id", - ondelete="SET NULL", - use_alter=True, - ), + state_id: Mapped[Optional[uuid.UUID]] = mapped_column( + sa.ForeignKey("task_run_state.id", ondelete="SET NULL", use_alter=True), index=True, ) # -------------------------- relationships # current states are eagerly loaded unless otherwise specified - _state = sa.orm.relationship( - "TaskRunState", + _state: Mapped[Optional[TaskRunState]] = relationship( lazy="selectin", foreign_keys=[state_id], primaryjoin="TaskRunState.id==TaskRun.state_id", ) @hybrid_property - def state(self): + def state(self) -> Optional[TaskRunState]: return self._state - @state.setter - def state(self, value): + @state.inplace.setter + def _set_state(self, value: Optional[TaskRunState]) -> None: # because this is a slightly non-standard SQLAlchemy relationship, we # prefer an explicit setter method to a setter property, because # user expectations about SQLAlchemy attribute assignment might not be @@ -758,7 +707,7 @@ def state(self, value): # still works because the ORM model's __init__ depends on it. return self.set_state(value) - def set_state(self, state): + def set_state(self, state: Optional[TaskRunState]) -> None: """ If a state is assigned to this run, populate its run id. @@ -770,15 +719,13 @@ def set_state(self, state): state.task_run_id = self.id self._state = state - flow_run = sa.orm.relationship( - "FlowRun", + flow_run: Mapped[Optional["FlowRun"]] = relationship( back_populates="task_runs", lazy="raise", foreign_keys=[flow_run_id], ) - subflow_run = sa.orm.relationship( - "FlowRun", + subflow_run: Mapped["FlowRun"] = relationship( back_populates="parent_task_run", lazy="raise", # foreign_keys=["FlowRun.parent_task_run_id"], @@ -786,120 +733,112 @@ def set_state(self, state): uselist=False, ) - __table_args__ = ( - sa.Index( - "uq_task_run__flow_run_id_task_key_dynamic_key", - "flow_run_id", - "task_key", - "dynamic_key", - unique=True, - ), - sa.Index( - "ix_task_run__expected_start_time_desc", - sa.desc("expected_start_time"), - ), - sa.Index( - "ix_task_run__next_scheduled_start_time_asc", - sa.asc("next_scheduled_start_time"), - ), - sa.Index( - "ix_task_run__end_time_desc", - sa.desc("end_time"), - ), - sa.Index( - "ix_task_run__start_time", - "start_time", - ), - sa.Index( - "ix_task_run__state_type", - "state_type", - ), - sa.Index( - "ix_task_run__state_name", - "state_name", - ), - sa.Index( - "ix_task_run__state_timestamp", - "state_timestamp", - ), - ) + @declared_attr.directive + @classmethod + def __table_args__(cls) -> Iterable[sa.Index]: + return ( + sa.Index( + "uq_task_run__flow_run_id_task_key_dynamic_key", + cls.flow_run_id, + cls.task_key, + cls.dynamic_key, + unique=True, + ), + sa.Index( + "ix_task_run__expected_start_time_desc", + cls.expected_start_time.desc(), + ), + sa.Index( + "ix_task_run__next_scheduled_start_time_asc", + cls.next_scheduled_start_time.asc(), + ), + sa.Index( + "ix_task_run__end_time_desc", + cls.end_time.desc(), + ), + sa.Index( + "ix_task_run__start_time", + cls.start_time, + ), + sa.Index( + "ix_task_run__state_type", + cls.state_type, + ), + sa.Index( + "ix_task_run__state_name", + cls.state_name, + ), + sa.Index( + "ix_task_run__state_timestamp", + cls.state_timestamp, + ), + sa.Index("trgm_ix_task_run_name", cls.name, postgresql_using="gin").ddl_if( + dialect="postgresql" + ), + ) class DeploymentSchedule(Base): - deployment_id = sa.Column( - UUID(), - sa.ForeignKey("deployment.id", ondelete="CASCADE"), - nullable=False, - index=True, + deployment_id: Mapped[uuid.UUID] = mapped_column( + sa.ForeignKey("deployment.id", ondelete="CASCADE"), index=True ) - schedule = sa.Column(Pydantic(schemas.schedules.SCHEDULE_TYPES), nullable=False) - active = sa.Column(sa.Boolean, nullable=False, default=True) - max_scheduled_runs = sa.Column(sa.Integer, nullable=True) + schedule: Mapped[schemas.schedules.SCHEDULE_TYPES] = mapped_column( + Pydantic(schemas.schedules.SCHEDULE_TYPES) + ) + active: Mapped[bool] = mapped_column(default=True) + max_scheduled_runs: Mapped[Optional[int]] class Deployment(Base): """SQLAlchemy model of a deployment.""" - name = sa.Column(sa.String, nullable=False) - version = sa.Column(sa.String, nullable=True) - description = sa.Column(sa.Text(), nullable=True) - work_queue_name = sa.Column(sa.String, nullable=True, index=True) - infra_overrides = sa.Column(JSON, server_default="{}", default=dict, nullable=False) - path = sa.Column(sa.String, nullable=True) - entrypoint = sa.Column(sa.String, nullable=True) + name: Mapped[str] + version: Mapped[Optional[str]] + description: Mapped[Optional[str]] = mapped_column(sa.Text()) + work_queue_name: Mapped[Optional[str]] = mapped_column(index=True) + infra_overrides: Mapped[dict[str, Any]] = mapped_column( + JSON, server_default="{}", default=dict + ) + path: Mapped[Optional[str]] + entrypoint: Mapped[Optional[str]] - last_polled = sa.Column(Timestamp(), nullable=True) - status = sa.Column( + last_polled: Mapped[Optional[pendulum.DateTime]] + status: Mapped[DeploymentStatus] = mapped_column( sa.Enum(DeploymentStatus, name="deployment_status"), - nullable=False, default=DeploymentStatus.NOT_READY, server_default="NOT_READY", ) @declared_attr - def job_variables(self): + def job_variables(self) -> Mapped[dict[str, Any]]: return synonym("infra_overrides") flow_id: Mapped[uuid.UUID] = mapped_column( - UUID, - sa.ForeignKey("flow.id", ondelete="CASCADE"), - nullable=False, - index=True, + sa.ForeignKey("flow.id", ondelete="CASCADE"), index=True ) - work_queue_id: Mapped[uuid.UUID] = mapped_column( - UUID, - sa.ForeignKey("work_queue.id", ondelete="SET NULL"), - nullable=True, - index=True, - ) - paused = sa.Column( - sa.Boolean, nullable=False, server_default="0", default=False, index=True + work_queue_id: Mapped[Optional[uuid.UUID]] = mapped_column( + sa.ForeignKey("work_queue.id", ondelete="SET NULL"), index=True ) + paused: Mapped[bool] = mapped_column(server_default="0", default=False, index=True) - schedules = sa.orm.relationship( - "DeploymentSchedule", - lazy="selectin", - order_by=sa.desc(sa.text("updated")), + schedules: Mapped[list["DeploymentSchedule"]] = relationship( + lazy="selectin", order_by=sa.desc(sa.text("updated")) ) # deprecated in favor of `concurrency_limit_id` FK - _concurrency_limit: Mapped[Union[int, None]] = mapped_column( - sa.Integer, default=None, nullable=True, name="concurrency_limit" - ) - concurrency_limit_id: Mapped[Union[uuid.UUID, None]] = mapped_column( - UUID, + _concurrency_limit: Mapped[Optional[int]] = mapped_column(name="concurrency_limit") + concurrency_limit_id: Mapped[Optional[uuid.UUID]] = mapped_column( sa.ForeignKey("concurrency_limit_v2.id", ondelete="SET NULL"), - nullable=True, ) global_concurrency_limit: Mapped[ - Union["ConcurrencyLimitV2", None] + Optional["ConcurrencyLimitV2"] ] = sa.orm.relationship( lazy="selectin", ) concurrency_options: Mapped[ - Union[schemas.core.ConcurrencyOptions, None] + Optional[schemas.core.ConcurrencyOptions] ] = mapped_column( Pydantic(schemas.core.ConcurrencyOptions), server_default=None, @@ -907,52 +846,45 @@ def job_variables(self): default=None, ) - tags: Mapped[List[str]] = mapped_column( - JSON, server_default="[]", default=list, nullable=False + tags: Mapped[list[str]] = mapped_column(JSON, server_default="[]", default=list) + labels: Mapped[Optional[schemas.core.KeyValueLabels]] = mapped_column(JSON) + parameters: Mapped[dict[str, Any]] = mapped_column( + JSON, server_default="{}", default=dict ) - labels: Mapped[Union[schemas.core.KeyValueLabels, None]] = mapped_column( - JSON, nullable=True + pull_steps: Mapped[Optional[list[dict[str, Any]]]] = mapped_column( + JSON, default=list ) - parameters = sa.Column(JSON, server_default="{}", default=dict, nullable=False) - pull_steps = sa.Column(JSON, default=list, nullable=True) - parameter_openapi_schema = sa.Column(JSON, default=dict, nullable=True) - enforce_parameter_schema = sa.Column( - sa.Boolean, default=True, server_default="0", nullable=False + parameter_openapi_schema: Mapped[Optional[dict[str, Any]]] = mapped_column( + JSON, default=dict ) - created_by = sa.Column( - Pydantic(schemas.core.CreatedBy), - server_default=None, - default=None, - nullable=True, + enforce_parameter_schema: Mapped[bool] = mapped_column( + default=True, server_default="0" ) - updated_by = sa.Column( - Pydantic(schemas.core.UpdatedBy), - server_default=None, - default=None, - nullable=True, + created_by: Mapped[Optional[schemas.core.CreatedBy]] = mapped_column( + Pydantic(schemas.core.CreatedBy) + ) + updated_by: Mapped[Optional[schemas.core.UpdatedBy]] = mapped_column( + Pydantic(schemas.core.UpdatedBy) ) - infrastructure_document_id = sa.Column( - UUID, - sa.ForeignKey("block_document.id", ondelete="CASCADE"), - nullable=True, - index=False, + infrastructure_document_id: Mapped[Optional[uuid.UUID]] = mapped_column( + sa.ForeignKey("block_document.id", ondelete="CASCADE"), index=False ) - storage_document_id = sa.Column( - UUID, + storage_document_id: Mapped[Optional[uuid.UUID]] = mapped_column( sa.ForeignKey("block_document.id", ondelete="CASCADE"), - nullable=True, index=False, ) - flow = sa.orm.relationship("Flow", back_populates="deployments", lazy="raise") + flow: Mapped["Flow"] = relationship( + "Flow", back_populates="deployments", lazy="raise" + ) - work_queue = sa.orm.relationship( - "WorkQueue", lazy="selectin", foreign_keys=[work_queue_id] + work_queue: Mapped[Optional["WorkQueue"]] = relationship( + lazy="selectin", foreign_keys=[work_queue_id] ) - __table_args__ = ( + __table_args__: Any = ( sa.Index( "uq_deployment__flow_id_name", "flow_id", @@ -963,6 +895,9 @@ def job_variables(self): "ix_deployment__created", "created", ), + sa.Index("trgm_ix_deployment_name", "name", postgresql_using="gin").ddl_if( + dialect="postgresql" + ), ) @@ -971,16 +906,16 @@ class Log(Base): SQLAlchemy model of a logging statement. """ - name = sa.Column(sa.String, nullable=False) - level = sa.Column(sa.SmallInteger, nullable=False, index=True) - flow_run_id = sa.Column(UUID(), nullable=True, index=True) - task_run_id = sa.Column(UUID(), nullable=True, index=True) - message = sa.Column(sa.Text, nullable=False) + name: Mapped[str] + level: Mapped[int] = mapped_column(sa.SmallInteger, index=True) + flow_run_id: Mapped[Optional[uuid.UUID]] = mapped_column(index=True) + task_run_id: Mapped[Optional[uuid.UUID]] = mapped_column(index=True) + message: Mapped[str] = mapped_column(sa.Text) # The client-side timestamp of this logged statement. - timestamp = sa.Column(Timestamp(), nullable=False, index=True) + timestamp: Mapped[pendulum.DateTime] = mapped_column(index=True) - __table_args__ = ( + __table_args__: Any = ( sa.Index( "ix_log__flow_run_id_timestamp", "flow_run_id", @@ -990,68 +925,68 @@ class Log(Base): class ConcurrencyLimit(Base): - tag = sa.Column(sa.String, nullable=False) - concurrency_limit = sa.Column(sa.Integer, nullable=False) - active_slots: Mapped[List[str]] = mapped_column( - JSON, server_default="[]", default=list, nullable=False + tag: Mapped[str] + concurrency_limit: Mapped[int] + active_slots: Mapped[list[str]] = mapped_column( + JSON, server_default="[]", default=list ) - __table_args__ = (sa.Index("uq_concurrency_limit__tag", "tag", unique=True),) + __table_args__: Any = (sa.Index("uq_concurrency_limit__tag", "tag", unique=True),) class ConcurrencyLimitV2(Base): - active = sa.Column(sa.Boolean, nullable=False, default=True) - name = sa.Column(sa.String, nullable=False) - limit = sa.Column(sa.Integer, nullable=False) - active_slots = sa.Column(sa.Integer, nullable=False, default=0) - denied_slots = sa.Column(sa.Integer, nullable=False, default=0) + active: Mapped[bool] = mapped_column(default=True) + name: Mapped[str] + limit: Mapped[int] + active_slots: Mapped[int] = mapped_column(default=0) + denied_slots: Mapped[int] = mapped_column(default=0) - slot_decay_per_second = sa.Column(sa.Float, default=0.0, nullable=False) - avg_slot_occupancy_seconds = sa.Column(sa.Float, default=2.0, nullable=False) + slot_decay_per_second: Mapped[float] = mapped_column(default=0.0) + avg_slot_occupancy_seconds: Mapped[float] = mapped_column(default=2.0) - __table_args__ = (sa.UniqueConstraint("name"),) + __table_args__: Any = (sa.UniqueConstraint("name"),) class BlockType(Base): - name = sa.Column(sa.String, nullable=False) - slug = sa.Column(sa.String, nullable=False) - logo_url = sa.Column(sa.String, nullable=True) - documentation_url = sa.Column(sa.String, nullable=True) - description = sa.Column(sa.String, nullable=True) - code_example = sa.Column(sa.String, nullable=True) - is_protected = sa.Column( - sa.Boolean, nullable=False, server_default="0", default=False - ) - - __table_args__ = ( + name: Mapped[str] + slug: Mapped[str] + logo_url: Mapped[Optional[str]] + documentation_url: Mapped[Optional[str]] + description: Mapped[Optional[str]] + code_example: Mapped[Optional[str]] + is_protected: Mapped[bool] = mapped_column(server_default="0", default=False) + + __table_args__: Any = ( sa.Index( "uq_block_type__slug", "slug", unique=True, ), + sa.Index("trgm_ix_block_type_name", "name", postgresql_using="gin").ddl_if( + dialect="postgresql" + ), ) class BlockSchema(Base): - checksum = sa.Column(sa.String, nullable=False) - fields = sa.Column(JSON, server_default="{}", default=dict, nullable=False) - capabilities = sa.Column(JSON, server_default="[]", default=list, nullable=False) - version = sa.Column( - sa.String, + checksum: Mapped[str] + fields: Mapped[dict[str, Any]] = mapped_column( + JSON, server_default="{}", default=dict + ) + capabilities: Mapped[list[str]] = mapped_column( + JSON, server_default="[]", default=list + ) + version: Mapped[str] = mapped_column( server_default=schemas.core.DEFAULT_BLOCK_SCHEMA_VERSION, - nullable=False, ) - block_type_id: Mapped[UUID] = mapped_column( - UUID(), - sa.ForeignKey("block_type.id", ondelete="cascade"), - nullable=False, - index=True, + block_type_id: Mapped[uuid.UUID] = mapped_column( + sa.ForeignKey("block_type.id", ondelete="cascade"), index=True ) - block_type = sa.orm.relationship("BlockType", lazy="selectin") + block_type: Mapped["BlockType"] = relationship(lazy="selectin") - __table_args__ = ( + __table_args__: Any = ( sa.Index( "uq_block_schema__checksum_version", "checksum", @@ -1059,58 +994,57 @@ class BlockSchema(Base): unique=True, ), sa.Index("ix_block_schema__created", "created"), + sa.Index( + "ix_block_schema__capabilities", "capabilities", postgresql_using="gin" + ).ddl_if(dialect="postgresql"), ) class BlockSchemaReference(Base): - name = sa.Column(sa.String, nullable=False) + name: Mapped[str] - parent_block_schema_id = sa.Column( - UUID(), - sa.ForeignKey("block_schema.id", ondelete="cascade"), - nullable=False, + parent_block_schema_id: Mapped[uuid.UUID] = mapped_column( + sa.ForeignKey("block_schema.id", ondelete="cascade") ) - reference_block_schema_id = sa.Column( - UUID(), - sa.ForeignKey("block_schema.id", ondelete="cascade"), - nullable=False, + reference_block_schema_id: Mapped[uuid.UUID] = mapped_column( + sa.ForeignKey("block_schema.id", ondelete="cascade") ) class BlockDocument(Base): - name = sa.Column(sa.String, nullable=False, index=True) - data = sa.Column(JSON, server_default="{}", default=dict, nullable=False) - is_anonymous = sa.Column(sa.Boolean, server_default="0", index=True, nullable=False) + name: Mapped[str] = mapped_column(index=True) + data: Mapped[Any] = mapped_column(JSON, server_default="{}", default=dict) + is_anonymous: Mapped[bool] = mapped_column(server_default="0", index=True) - block_type_name = sa.Column(sa.String, nullable=True) + block_type_name: Mapped[Optional[str]] - block_type_id = sa.Column( - UUID(), - sa.ForeignKey("block_type.id", ondelete="cascade"), - nullable=False, + block_type_id: Mapped[uuid.UUID] = mapped_column( + sa.ForeignKey("block_type.id", ondelete="cascade") ) - block_type = sa.orm.relationship("BlockType", lazy="selectin") + block_type: Mapped["BlockType"] = relationship(lazy="selectin") - block_schema_id = sa.Column( - UUID(), - sa.ForeignKey("block_schema.id", ondelete="cascade"), - nullable=False, + block_schema_id: Mapped[uuid.UUID] = mapped_column( + sa.ForeignKey("block_schema.id", ondelete="cascade") ) - block_schema = sa.orm.relationship("BlockSchema", lazy="selectin") + block_schema: Mapped["BlockSchema"] = relationship(lazy="selectin") - __table_args__ = ( + __table_args__: Any = ( sa.Index( "uq_block__type_id_name", "block_type_id", "name", unique=True, ), + sa.Index("ix_block_document__block_type_name__name", "block_type_name", "name"), + sa.Index("trgm_ix_block_document_name", "name", postgresql_using="gin").ddl_if( + dialect="postgresql" + ), ) - async def encrypt_data(self, session, data): + async def encrypt_data(self, session: AsyncSession, data: dict[str, Any]) -> None: """ Store encrypted data on the ORM model @@ -1118,7 +1052,7 @@ async def encrypt_data(self, session, data): """ self.data = await encrypt_fernet(session, data) - async def decrypt_data(self, session): + async def decrypt_data(self, session: AsyncSession) -> dict[str, Any]: """ Retrieve decrypted data from the ORM model. @@ -1128,190 +1062,159 @@ async def decrypt_data(self, session): class BlockDocumentReference(Base): - name: Mapped[str] = mapped_column(sa.String, nullable=False) + name: Mapped[str] - parent_block_document_id: Mapped[UUID] = mapped_column( - UUID(), + parent_block_document_id: Mapped[uuid.UUID] = mapped_column( sa.ForeignKey("block_document.id", ondelete="cascade"), - nullable=False, ) - reference_block_document_id: Mapped[UUID] = mapped_column( - UUID(), + reference_block_document_id: Mapped[uuid.UUID] = mapped_column( sa.ForeignKey("block_document.id", ondelete="cascade"), - nullable=False, ) class Configuration(Base): - key = sa.Column(sa.String, nullable=False, index=True) - value: Mapped[Dict[str, Any]] = mapped_column(JSON, nullable=False) + key: Mapped[str] = mapped_column(index=True) + value: Mapped[Dict[str, Any]] = mapped_column(JSON) - __table_args__ = (sa.UniqueConstraint("key"),) + __table_args__: Any = (sa.UniqueConstraint("key"),) class SavedSearch(Base): """SQLAlchemy model of a saved search.""" - name = sa.Column(sa.String, nullable=False) - filters = sa.Column( - JSON, - server_default="[]", - default=list, - nullable=False, + name: Mapped[str] + filters: Mapped[list[dict[str, Any]]] = mapped_column( + JSON, server_default="[]", default=list ) - __table_args__ = (sa.UniqueConstraint("name"),) + __table_args__: Any = (sa.UniqueConstraint("name"),) class WorkQueue(Base): """SQLAlchemy model of a work queue""" - name = sa.Column(sa.String, nullable=False) + name: Mapped[str] - filter = sa.Column( - Pydantic(schemas.core.QueueFilter), - server_default=None, - default=None, - nullable=True, + filter: Mapped[Optional[schemas.core.QueueFilter]] = mapped_column( + Pydantic(schemas.core.QueueFilter) ) - description = sa.Column(sa.String, nullable=False, default="", server_default="") - is_paused = sa.Column(sa.Boolean, nullable=False, server_default="0", default=False) - concurrency_limit: Mapped[int] = mapped_column( - sa.Integer, - nullable=True, - ) - priority: Mapped[int] = mapped_column(sa.Integer, index=True, nullable=False) + description: Mapped[str] = mapped_column(default="", server_default="") + is_paused: Mapped[bool] = mapped_column(server_default="0", default=False) + concurrency_limit: Mapped[Optional[int]] + priority: Mapped[int] - last_polled: Mapped[Union[pendulum.DateTime, None]] = mapped_column( - Timestamp(), - nullable=True, - ) - status = sa.Column( + last_polled: Mapped[Optional[pendulum.DateTime]] + status: Mapped[WorkQueueStatus] = mapped_column( sa.Enum(WorkQueueStatus, name="work_queue_status"), - nullable=False, default=WorkQueueStatus.NOT_READY, - server_default=WorkQueueStatus.NOT_READY.value, + server_default=WorkQueueStatus.NOT_READY, ) - __table_args__ = (sa.UniqueConstraint("work_pool_id", "name"),) - work_pool_id: Mapped[uuid.UUID] = mapped_column( - UUID, - sa.ForeignKey("work_pool.id", ondelete="cascade"), - nullable=False, - index=True, + sa.ForeignKey("work_pool.id", ondelete="cascade"), index=True ) - work_pool = sa.orm.relationship( - "WorkPool", - lazy="selectin", - foreign_keys=[work_pool_id], + work_pool: Mapped["WorkPool"] = relationship( + lazy="selectin", foreign_keys=[work_pool_id] + ) + + __table_args__ = ( + sa.UniqueConstraint("work_pool_id", "name"), + sa.Index("ix_work_queue__work_pool_id_priority", "work_pool_id", "priority"), + sa.Index("trgm_ix_work_queue_name", "name", postgresql_using="gin").ddl_if( + dialect="postgresql" + ), ) class WorkPool(Base): """SQLAlchemy model of an worker""" - name = sa.Column(sa.String, nullable=False) - description = sa.Column(sa.String) - type: Mapped[str] = mapped_column(sa.String) - base_job_template = sa.Column(JSON, nullable=False, server_default="{}", default={}) - is_paused: Mapped[bool] = mapped_column( - sa.Boolean, nullable=False, server_default="0", default=False + name: Mapped[str] + description: Mapped[Optional[str]] + type: Mapped[str] = mapped_column(index=True) + base_job_template: Mapped[dict[str, Any]] = mapped_column( + JSON, server_default="{}", default={} ) - default_queue_id: Mapped[UUID] = mapped_column(UUID, nullable=True) - concurrency_limit = sa.Column( - sa.Integer, + is_paused: Mapped[bool] = mapped_column(server_default="0", default=False) + default_queue_id: Mapped[Optional[uuid.UUID]] = mapped_column( + UUID, + sa.ForeignKey("work_queue.id", ondelete="RESTRICT", use_alter=True), nullable=True, ) + concurrency_limit: Mapped[Optional[int]] status: Mapped[WorkPoolStatus] = mapped_column( sa.Enum(WorkPoolStatus, name="work_pool_status"), - nullable=False, default=WorkPoolStatus.NOT_READY, - server_default=WorkPoolStatus.NOT_READY.value, + server_default=WorkPoolStatus.NOT_READY, ) - last_transitioned_status_at: Mapped[Union[pendulum.DateTime, None]] = mapped_column( - Timestamp(), nullable=True - ) - last_status_event_id: Mapped[uuid.UUID] = mapped_column(UUID, nullable=True) + last_transitioned_status_at: Mapped[Optional[pendulum.DateTime]] + last_status_event_id: Mapped[Optional[uuid.UUID]] - __table_args__ = (sa.UniqueConstraint("name"),) + __table_args__: Any = (sa.UniqueConstraint("name"),) class Worker(Base): """SQLAlchemy model of an worker""" - @declared_attr - def work_pool_id(cls): - return sa.Column( - UUID, - sa.ForeignKey("work_pool.id", ondelete="cascade"), - nullable=False, - index=True, - ) + work_pool_id: Mapped[uuid.UUID] = mapped_column( + sa.ForeignKey("work_pool.id", ondelete="cascade"), index=True + ) - name = sa.Column(sa.String, nullable=False) - last_heartbeat_time = sa.Column( - Timestamp(), - nullable=False, - server_default=now(), - default=lambda: pendulum.now("UTC"), - index=True, + name: Mapped[str] + last_heartbeat_time: Mapped[pendulum.DateTime] = mapped_column( + server_default=now(), default=lambda: pendulum.now("UTC") ) - heartbeat_interval_seconds = sa.Column(sa.Integer, nullable=True) + heartbeat_interval_seconds: Mapped[Optional[int]] - status = sa.Column( + status: Mapped[WorkerStatus] = mapped_column( sa.Enum(WorkerStatus, name="worker_status"), - nullable=False, default=WorkerStatus.OFFLINE, - server_default=WorkerStatus.OFFLINE.value, + server_default=WorkerStatus.OFFLINE, ) - __table_args__ = (sa.UniqueConstraint("work_pool_id", "name"),) + __table_args__: Any = ( + sa.UniqueConstraint("work_pool_id", "name"), + sa.Index( + "ix_worker__work_pool_id_last_heartbeat_time", + "work_pool_id", + "last_heartbeat_time", + ), + ) class Agent(Base): """SQLAlchemy model of an agent""" - name = sa.Column(sa.String, nullable=False) + name: Mapped[str] - work_queue_id = sa.Column( - UUID, - sa.ForeignKey("work_queue.id"), - nullable=False, - index=True, + work_queue_id: Mapped[uuid.UUID] = mapped_column( + sa.ForeignKey("work_queue.id"), index=True ) - last_activity_time = sa.Column( - Timestamp(), - nullable=False, - server_default=now(), - default=lambda: pendulum.now("UTC"), + last_activity_time: Mapped[pendulum.DateTime] = mapped_column( + server_default=now(), default=lambda: pendulum.now("UTC") ) - __table_args__ = (sa.UniqueConstraint("name"),) + __table_args__: Any = (sa.UniqueConstraint("name"),) class FlowRunNotificationPolicy(Base): - is_active = sa.Column(sa.Boolean, server_default="1", default=True, nullable=False) - state_names = sa.Column(JSON, server_default="[]", default=[], nullable=False) - tags: Mapped[List[str]] = mapped_column( - JSON, server_default="[]", default=[], nullable=False + is_active: Mapped[bool] = mapped_column(server_default="1", default=True) + state_names: Mapped[list[str]] = mapped_column( + JSON, server_default="[]", default=[] ) - message_template = sa.Column(sa.String, nullable=True) + tags: Mapped[list[str]] = mapped_column(JSON, server_default="[]", default=[]) + message_template: Mapped[Optional[str]] - block_document_id = sa.Column( - UUID(), - sa.ForeignKey("block_document.id", ondelete="cascade"), - nullable=False, + block_document_id: Mapped[uuid.UUID] = mapped_column( + sa.ForeignKey("block_document.id", ondelete="cascade") ) - block_document = sa.orm.relationship( - "BlockDocument", - lazy="selectin", - foreign_keys=[block_document_id], + block_document: Mapped["BlockDocument"] = relationship( + lazy="selectin", foreign_keys=[block_document_id] ) @@ -1319,64 +1222,60 @@ class FlowRunNotificationQueue(Base): # these are both foreign keys but there is no need to enforce that constraint # as this is just a queue for service workers; if the keys don't match at the # time work is pulled, the work can be discarded - flow_run_notification_policy_id = sa.Column(UUID, nullable=False) - flow_run_state_id = sa.Column(UUID, nullable=False) + flow_run_notification_policy_id: Mapped[uuid.UUID] + flow_run_state_id: Mapped[uuid.UUID] class Variable(Base): - name = sa.Column(sa.String, nullable=False) - value = sa.Column(sa.JSON, nullable=False) - tags = sa.Column(JSON, server_default="[]", default=list, nullable=False) + name: Mapped[str] + value: Mapped[Optional[Any]] = mapped_column(JSON) + tags: Mapped[list[str]] = mapped_column(JSON, server_default="[]", default=list) - __table_args__ = (sa.UniqueConstraint("name"),) + __table_args__: Any = (sa.UniqueConstraint("name"),) class FlowRunInput(Base): - flow_run_id = sa.Column( - UUID(), sa.ForeignKey("flow_run.id", ondelete="cascade"), nullable=False + flow_run_id: Mapped[uuid.UUID] = mapped_column( + sa.ForeignKey("flow_run.id", ondelete="cascade") ) - key = sa.Column(sa.String, nullable=False) - value = sa.Column(sa.Text(), nullable=False) - sender = sa.Column(sa.String, nullable=True) + key: Mapped[str] + value: Mapped[str] = mapped_column(sa.Text()) + sender: Mapped[Optional[str]] - __table_args__ = (sa.UniqueConstraint("flow_run_id", "key"),) + __table_args__: Any = (sa.UniqueConstraint("flow_run_id", "key"),) class CsrfToken(Base): - token = sa.Column(sa.String, nullable=False) - client = sa.Column(sa.String, nullable=False, unique=True) - expiration = sa.Column(Timestamp(), nullable=False) + token: Mapped[str] + client: Mapped[str] = mapped_column(unique=True) + expiration: Mapped[pendulum.DateTime] class Automation(Base): - name = sa.Column(sa.String, nullable=False) - description = sa.Column(sa.String, nullable=False, default="") + name: Mapped[str] + description: Mapped[str] = mapped_column(default="") - enabled = sa.Column(sa.Boolean, nullable=False, server_default="1", default=True) + enabled: Mapped[bool] = mapped_column(server_default="1", default=True) - trigger = sa.Column(Pydantic(ServerTriggerTypes), nullable=False) + trigger: Mapped[ServerTriggerTypes] = mapped_column(Pydantic(ServerTriggerTypes)) - actions = sa.Column(Pydantic(List[ServerActionTypes]), nullable=False) - actions_on_trigger = sa.Column( - Pydantic(List[ServerActionTypes]), - server_default="[]", - default=list, - nullable=False, + actions: Mapped[ServerActionTypes] = mapped_column( + Pydantic(list[ServerActionTypes]) + ) + actions_on_trigger: Mapped[list[ServerActionTypes]] = mapped_column( + Pydantic(list[ServerActionTypes]), server_default="[]", default=list ) - actions_on_resolve = sa.Column( - Pydantic(List[ServerActionTypes]), - server_default="[]", - default=list, - nullable=False, + actions_on_resolve: Mapped[list[ServerActionTypes]] = mapped_column( + Pydantic(list[ServerActionTypes]), server_default="[]", default=list ) - related_resources = sa.orm.relationship( + related_resources: Mapped[list["AutomationRelatedResource"]] = relationship( "AutomationRelatedResource", back_populates="automation", lazy="raise" ) @classmethod - def sort_expression(cls, value: AutomationSort) -> ColumnElement: + def sort_expression(cls, value: AutomationSort) -> sa.ColumnExpressionArgument[Any]: """Return an expression used to sort Automations""" sort_mapping = { AutomationSort.CREATED_DESC: cls.created.desc(), @@ -1388,7 +1287,7 @@ def sort_expression(cls, value: AutomationSort) -> ColumnElement: class AutomationBucket(Base): - __table_args__ = ( + __table_args__: Any = ( sa.Index( "uq_automation_bucket__automation_id__trigger_id__bucketing_key", "automation_id", @@ -1403,28 +1302,30 @@ class AutomationBucket(Base): ), ) - automation_id = sa.Column( - UUID(), sa.ForeignKey("automation.id", ondelete="CASCADE"), nullable=False + automation_id: Mapped[uuid.UUID] = mapped_column( + sa.ForeignKey("automation.id", ondelete="CASCADE") ) - trigger_id = sa.Column(UUID, nullable=False) + trigger_id: Mapped[uuid.UUID] - bucketing_key = sa.Column(JSON, server_default="[]", default=list, nullable=False) + bucketing_key: Mapped[list[str]] = mapped_column( + JSON, server_default="[]", default=list + ) - last_event = sa.Column(Pydantic(ReceivedEvent), nullable=True) + last_event: Mapped[Optional[ReceivedEvent]] = mapped_column(Pydantic(ReceivedEvent)) - start = sa.Column(Timestamp(), nullable=False) - end = sa.Column(Timestamp(), nullable=False) + start: Mapped[pendulum.DateTime] + end: Mapped[pendulum.DateTime] - count = sa.Column(sa.Integer, nullable=False) + count: Mapped[int] - last_operation = sa.Column(sa.String, nullable=True) + last_operation: Mapped[Optional[str]] - triggered_at = sa.Column(Timestamp(), nullable=True) + triggered_at: Mapped[Optional[pendulum.DateTime]] class AutomationRelatedResource(Base): - __table_args__ = ( + __table_args__: Any = ( sa.Index( "uq_automation_related_resource__automation_id__resource_id", "automation_id", @@ -1433,22 +1334,22 @@ class AutomationRelatedResource(Base): ), ) - automation_id = sa.Column( - UUID(), sa.ForeignKey("automation.id", ondelete="CASCADE"), nullable=False + automation_id: Mapped[uuid.UUID] = mapped_column( + sa.ForeignKey("automation.id", ondelete="CASCADE") ) - resource_id = sa.Column(sa.String, index=True) - automation_owned_by_resource = sa.Column( - sa.Boolean, nullable=False, default=False, server_default="0" + resource_id: Mapped[Optional[str]] = mapped_column(index=True) + automation_owned_by_resource: Mapped[bool] = mapped_column( + default=False, server_default="0" ) - automation = sa.orm.relationship( + automation: Mapped["Automation"] = relationship( "Automation", back_populates="related_resources", lazy="raise" ) class CompositeTriggerChildFiring(Base): - __table_args__ = ( + __table_args__: Any = ( sa.Index( "uq_composite_trigger_child_firing__a_id__pt_id__ct__id", "automation_id", @@ -1458,20 +1359,20 @@ class CompositeTriggerChildFiring(Base): ), ) - automation_id = sa.Column( - UUID(), sa.ForeignKey("automation.id", ondelete="CASCADE"), nullable=False + automation_id: Mapped[uuid.UUID] = mapped_column( + sa.ForeignKey("automation.id", ondelete="CASCADE") ) - parent_trigger_id = sa.Column(UUID(), nullable=False) + parent_trigger_id: Mapped[uuid.UUID] - child_trigger_id = sa.Column(UUID(), nullable=False) - child_firing_id = sa.Column(UUID(), nullable=False) - child_fired_at = sa.Column(Timestamp()) - child_firing = sa.Column(Pydantic(Firing), nullable=False) + child_trigger_id: Mapped[uuid.UUID] + child_firing_id: Mapped[uuid.UUID] + child_fired_at: Mapped[Optional[pendulum.DateTime]] + child_firing: Mapped[Firing] = mapped_column(Pydantic(Firing)) class AutomationEventFollower(Base): - __table_args__ = ( + __table_args__: Any = ( sa.Index( "uq_follower_for_scope", "scope", @@ -1479,19 +1380,19 @@ class AutomationEventFollower(Base): unique=True, ), ) - scope = sa.Column(sa.String, nullable=False, default="", index=True) - leader_event_id = sa.Column(UUID(), nullable=False, index=True) - follower_event_id = sa.Column(UUID(), nullable=False) - received = sa.Column(Timestamp(), nullable=False, index=True) - follower = sa.Column(Pydantic(ReceivedEvent), nullable=False) + scope: Mapped[str] = mapped_column(default="", index=True) + leader_event_id: Mapped[uuid.UUID] = mapped_column(index=True) + follower_event_id: Mapped[uuid.UUID] + received: Mapped[pendulum.DateTime] = mapped_column(index=True) + follower: Mapped[ReceivedEvent] = mapped_column(Pydantic(ReceivedEvent)) class Event(Base): - @declared_attr - def __tablename__(cls): + @declared_attr.directive + def __tablename__(cls) -> str: return "events" - __table_args__ = ( + __table_args__: Any = ( sa.Index("ix_events__related_resource_ids", "related_resource_ids"), sa.Index("ix_events__occurred", "occurred"), sa.Index("ix_events__event__id", "event", "id"), @@ -1506,26 +1407,28 @@ def __tablename__(cls): sa.Index("ix_events__event_related_occurred", "event", "related", "occurred"), ) - occurred = sa.Column(Timestamp(), nullable=False) - event = sa.Column(sa.Text(), nullable=False) - resource_id = sa.Column(sa.Text(), nullable=False) - resource = sa.Column(JSON(), nullable=False) - related_resource_ids = sa.Column( - JSON(), server_default="[]", default=list, nullable=False + occurred: Mapped[pendulum.DateTime] + event: Mapped[str] = mapped_column(sa.Text()) + resource_id: Mapped[str] = mapped_column(sa.Text()) + resource: Mapped[dict[str, Any]] = mapped_column(JSON()) + related_resource_ids: Mapped[list[str]] = mapped_column( + JSON(), server_default="[]", default=list ) - related = sa.Column(JSON(), server_default="[]", default=list, nullable=False) - payload = sa.Column(JSON(), nullable=False) - received = sa.Column(Timestamp(), nullable=False) - recorded = sa.Column(Timestamp(), nullable=False) - follows = sa.Column(UUID(), nullable=True) + related: Mapped[list[dict[str, Any]]] = mapped_column( + JSON(), server_default="[]", default=list + ) + payload: Mapped[dict[str, Any]] = mapped_column(JSON()) + received: Mapped[pendulum.DateTime] + recorded: Mapped[pendulum.DateTime] + follows: Mapped[Optional[uuid.UUID]] class EventResource(Base): - @declared_attr - def __tablename__(cls): + @declared_attr.directive + def __tablename__(cls) -> str: return "event_resources" - __table_args__ = ( + __table_args__: Any = ( sa.Index( "ix_event_resources__resource_id__occurred", "resource_id", @@ -1533,11 +1436,11 @@ def __tablename__(cls): ), ) - occurred = sa.Column("occurred", Timestamp(), nullable=False) - resource_id = sa.Column("resource_id", sa.Text(), nullable=False) - resource_role = sa.Column("resource_role", sa.Text(), nullable=False) - resource = sa.Column("resource", sa.JSON(), nullable=False) - event_id = sa.Column("event_id", UUID(), nullable=False) + occurred: Mapped[pendulum.DateTime] + resource_id: Mapped[str] = mapped_column(sa.Text()) + resource_role: Mapped[str] = mapped_column(sa.Text()) + resource: Mapped[dict[str, Any]] = mapped_column(sa.JSON()) + event_id: Mapped[uuid.UUID] # These are temporary until we've migrated all the references to the new, @@ -1582,6 +1485,9 @@ def __tablename__(cls): ORMEventResource = EventResource +_UpsertColumns = Iterable[Union[str, "sa.Column[Any]", roles.DDLConstraintColumnRole]] + + class BaseORMConfiguration(ABC): """ Abstract base class used to inject database-specific ORM configuration into Prefect. @@ -1590,7 +1496,7 @@ class BaseORMConfiguration(ABC): Use with caution. """ - def _unique_key(self) -> Tuple[Hashable, ...]: + def _unique_key(self) -> tuple[Hashable, ...]: """ Returns a key used to determine whether to instantiate a new DB interface. """ @@ -1598,52 +1504,52 @@ def _unique_key(self) -> Tuple[Hashable, ...]: @property @abstractmethod - def versions_dir(self): + def versions_dir(self) -> Path: """Directory containing migrations""" ... @property - def deployment_unique_upsert_columns(self): + def deployment_unique_upsert_columns(self) -> _UpsertColumns: """Unique columns for upserting a Deployment""" return [Deployment.flow_id, Deployment.name] @property - def concurrency_limit_unique_upsert_columns(self): + def concurrency_limit_unique_upsert_columns(self) -> _UpsertColumns: """Unique columns for upserting a ConcurrencyLimit""" return [ConcurrencyLimit.tag] @property - def flow_run_unique_upsert_columns(self): + def flow_run_unique_upsert_columns(self) -> _UpsertColumns: """Unique columns for upserting a FlowRun""" return [FlowRun.flow_id, FlowRun.idempotency_key] @property - def block_type_unique_upsert_columns(self): + def block_type_unique_upsert_columns(self) -> _UpsertColumns: """Unique columns for upserting a BlockType""" return [BlockType.slug] @property - def artifact_collection_unique_upsert_columns(self): + def artifact_collection_unique_upsert_columns(self) -> _UpsertColumns: """Unique columns for upserting an ArtifactCollection""" return [ArtifactCollection.key] @property - def block_schema_unique_upsert_columns(self): + def block_schema_unique_upsert_columns(self) -> _UpsertColumns: """Unique columns for upserting a BlockSchema""" return [BlockSchema.checksum, BlockSchema.version] @property - def flow_unique_upsert_columns(self): + def flow_unique_upsert_columns(self) -> _UpsertColumns: """Unique columns for upserting a Flow""" return [Flow.name] @property - def saved_search_unique_upsert_columns(self): + def saved_search_unique_upsert_columns(self) -> _UpsertColumns: """Unique columns for upserting a SavedSearch""" return [SavedSearch.name] @property - def task_run_unique_upsert_columns(self): + def task_run_unique_upsert_columns(self) -> _UpsertColumns: """Unique columns for upserting a TaskRun""" return [ TaskRun.flow_run_id, @@ -1652,7 +1558,7 @@ def task_run_unique_upsert_columns(self): ] @property - def block_document_unique_upsert_columns(self): + def block_document_unique_upsert_columns(self) -> _UpsertColumns: """Unique columns for upserting a BlockDocument""" return [BlockDocument.block_type_id, BlockDocument.name] @@ -1663,9 +1569,11 @@ class AsyncPostgresORMConfiguration(BaseORMConfiguration): @property def versions_dir(self) -> Path: """Directory containing migrations""" + import prefect.server.database + return ( Path(prefect.server.database.__file__).parent - / "migrations" + / "_migrations" / "versions" / "postgresql" ) @@ -1677,9 +1585,11 @@ class AioSqliteORMConfiguration(BaseORMConfiguration): @property def versions_dir(self) -> Path: """Directory containing migrations""" + import prefect.server.database + return ( Path(prefect.server.database.__file__).parent - / "migrations" + / "_migrations" / "versions" / "sqlite" ) diff --git a/src/prefect/server/events/counting.py b/src/prefect/server/events/counting.py index aa8d833c4bac..72d6051c0d19 100644 --- a/src/prefect/server/events/counting.py +++ b/src/prefect/server/events/counting.py @@ -4,12 +4,12 @@ import pendulum import sqlalchemy as sa -from pendulum.datetime import DateTime from sqlalchemy.sql.selectable import Select from prefect.server.database.dependencies import provide_database_interface from prefect.server.database.interface import PrefectDBInterface from prefect.server.utilities.database import json_extract +from prefect.types import DateTime from prefect.utilities.collections import AutoEnum if TYPE_CHECKING: diff --git a/src/prefect/server/events/filters.py b/src/prefect/server/events/filters.py index 2736b6290719..d4f2453b09e4 100644 --- a/src/prefect/server/events/filters.py +++ b/src/prefect/server/events/filters.py @@ -7,7 +7,6 @@ import pendulum import sqlalchemy as sa from pydantic import Field, PrivateAttr -from pydantic_extra_types.pendulum_dt import DateTime from sqlalchemy.sql import Select from prefect._internal.schemas.bases import PrefectBaseModel @@ -17,6 +16,7 @@ PrefectOperatorFilterBaseModel, ) from prefect.server.utilities.database import json_extract +from prefect.types import DateTime from prefect.utilities.collections import AutoEnum from .schemas.events import Event, Resource, ResourceSpecification diff --git a/src/prefect/server/events/schemas/automations.py b/src/prefect/server/events/schemas/automations.py index 548db796142a..8f54426990ab 100644 --- a/src/prefect/server/events/schemas/automations.py +++ b/src/prefect/server/events/schemas/automations.py @@ -25,7 +25,6 @@ field_validator, model_validator, ) -from pydantic_extra_types.pendulum_dt import DateTime from typing_extensions import Self, TypeAlias from prefect.logging import get_logger @@ -39,6 +38,7 @@ ) from prefect.server.schemas.actions import ActionBaseModel from prefect.server.utilities.schemas import ORMBaseModel, PrefectBaseModel +from prefect.types import DateTime from prefect.utilities.collections import AutoEnum logger = get_logger(__name__) diff --git a/src/prefect/server/events/schemas/events.py b/src/prefect/server/events/schemas/events.py index e01e79ec68dd..d72073b6e42f 100644 --- a/src/prefect/server/events/schemas/events.py +++ b/src/prefect/server/events/schemas/events.py @@ -23,7 +23,6 @@ field_validator, model_validator, ) -from pydantic_extra_types.pendulum_dt import DateTime from typing_extensions import Annotated, Self from prefect.logging import get_logger @@ -33,6 +32,7 @@ PREFECT_EVENTS_MAXIMUM_LABELS_PER_RESOURCE, PREFECT_EVENTS_MAXIMUM_RELATED_RESOURCES, ) +from prefect.types import DateTime logger = get_logger(__name__) diff --git a/src/prefect/server/events/triggers.py b/src/prefect/server/events/triggers.py index 2c722901bab8..9a839bc5e66a 100644 --- a/src/prefect/server/events/triggers.py +++ b/src/prefect/server/events/triggers.py @@ -19,7 +19,6 @@ import pendulum import sqlalchemy as sa -from pendulum.datetime import DateTime from sqlalchemy.ext.asyncio import AsyncSession from typing_extensions import Literal, TypeAlias @@ -57,6 +56,7 @@ from prefect.server.events.schemas.events import ReceivedEvent from prefect.server.utilities.messaging import Message, MessageHandler from prefect.settings import PREFECT_EVENTS_EXPIRED_BUCKET_BUFFER +from prefect.types import DateTime if TYPE_CHECKING: from prefect.server.database.orm_models import ORMAutomationBucket diff --git a/src/prefect/server/models/flow_runs.py b/src/prefect/server/models/flow_runs.py index fbb6f522f61b..a454fff8f1d4 100644 --- a/src/prefect/server/models/flow_runs.py +++ b/src/prefect/server/models/flow_runs.py @@ -29,6 +29,7 @@ import prefect.server.models as models import prefect.server.schemas as schemas +from prefect.logging.loggers import get_logger from prefect.server.database import orm_models from prefect.server.database.dependencies import db_injector from prefect.server.database.interface import PrefectDBInterface @@ -46,6 +47,13 @@ PREFECT_API_MAX_FLOW_RUN_GRAPH_ARTIFACTS, PREFECT_API_MAX_FLOW_RUN_GRAPH_NODES, ) +from prefect.types import KeyValueLabels + +logger = get_logger("flow_runs") + + +logger = get_logger("flow_runs") + T = TypeVar("T", bound=tuple) @@ -633,3 +641,41 @@ async def with_system_labels_for_flow_run( ) return parent_labels | default_labels | user_supplied_labels + + +async def update_flow_run_labels( + session: AsyncSession, + flow_run_id: UUID, + labels: KeyValueLabels, +) -> bool: + """ + Update flow run labels by patching existing labels with new values. + Args: + session: A database session + flow_run_id: the flow run id to update + labels: the new labels to patch into existing labels + Returns: + bool: whether the update was successful + """ + # First read the existing flow run to get current labels + flow_run: Optional[orm_models.FlowRun] = await read_flow_run(session, flow_run_id) + if not flow_run: + raise ObjectNotFoundError(f"Flow run with id {flow_run_id} not found") + + # Merge existing labels with new labels + current_labels = flow_run.labels or {} + updated_labels = {**current_labels, **labels} + + try: + # Update the flow run with merged labels + result = await session.execute( + sa.update(orm_models.FlowRun) + .where(orm_models.FlowRun.id == flow_run_id) + .values(labels=updated_labels) + ) + success = result.rowcount > 0 + if success: + await session.commit() # Explicitly commit + return success + except Exception: + raise diff --git a/src/prefect/server/models/task_workers.py b/src/prefect/server/models/task_workers.py index c1213ae41381..899eba98b4d9 100644 --- a/src/prefect/server/models/task_workers.py +++ b/src/prefect/server/models/task_workers.py @@ -3,9 +3,10 @@ from typing import Dict, List, Set from pydantic import BaseModel -from pydantic_extra_types.pendulum_dt import DateTime from typing_extensions import TypeAlias +from prefect.types import DateTime + TaskKey: TypeAlias = str WorkerId: TypeAlias = str diff --git a/src/prefect/server/schemas/actions.py b/src/prefect/server/schemas/actions.py index e95efe7c741d..3b61e4f821a5 100644 --- a/src/prefect/server/schemas/actions.py +++ b/src/prefect/server/schemas/actions.py @@ -9,7 +9,6 @@ import pendulum from pydantic import ConfigDict, Field, field_validator, model_validator -from pydantic_extra_types.pendulum_dt import DateTime import prefect.server.schemas as schemas from prefect._internal.schemas.validators import ( @@ -31,6 +30,7 @@ from prefect.settings import PREFECT_DEPLOYMENT_SCHEDULE_MAX_SCHEDULED_RUNS from prefect.types import ( MAX_VARIABLE_NAME_LENGTH, + DateTime, Name, NonEmptyishName, NonNegativeFloat, diff --git a/src/prefect/server/schemas/core.py b/src/prefect/server/schemas/core.py index 496941340a3d..50cd8d0c40c6 100644 --- a/src/prefect/server/schemas/core.py +++ b/src/prefect/server/schemas/core.py @@ -17,7 +17,6 @@ field_validator, model_validator, ) -from pydantic_extra_types.pendulum_dt import DateTime from typing_extensions import Literal, Self from prefect._internal.schemas.validators import ( @@ -43,6 +42,7 @@ from prefect.settings import PREFECT_DEPLOYMENT_SCHEDULE_MAX_SCHEDULED_RUNS from prefect.types import ( MAX_VARIABLE_NAME_LENGTH, + DateTime, LaxUrl, Name, NameOrEmpty, diff --git a/src/prefect/server/schemas/filters.py b/src/prefect/server/schemas/filters.py index 6c6d2892465d..e5e1112c40a9 100644 --- a/src/prefect/server/schemas/filters.py +++ b/src/prefect/server/schemas/filters.py @@ -8,11 +8,11 @@ from uuid import UUID from pydantic import ConfigDict, Field -from pydantic_extra_types.pendulum_dt import DateTime import prefect.server.schemas as schemas from prefect.server.database import orm_models from prefect.server.utilities.schemas.bases import PrefectBaseModel +from prefect.types import DateTime from prefect.utilities.collections import AutoEnum from prefect.utilities.importtools import lazy_import diff --git a/src/prefect/server/schemas/responses.py b/src/prefect/server/schemas/responses.py index 207631d9473e..1249d7c37573 100644 --- a/src/prefect/server/schemas/responses.py +++ b/src/prefect/server/schemas/responses.py @@ -8,7 +8,6 @@ import pendulum from pydantic import BaseModel, ConfigDict, Field, model_validator -from pydantic_extra_types.pendulum_dt import DateTime from typing_extensions import Literal, Self import prefect.server.schemas as schemas @@ -19,7 +18,7 @@ WorkQueueStatusDetail, ) from prefect.server.utilities.schemas.bases import ORMBaseModel, PrefectBaseModel -from prefect.types import KeyValueLabelsField +from prefect.types import DateTime, KeyValueLabelsField from prefect.utilities.collections import AutoEnum from prefect.utilities.names import generate_slug diff --git a/src/prefect/server/schemas/schedules.py b/src/prefect/server/schemas/schedules.py index cfce1a320a33..ff35b0f219fe 100644 --- a/src/prefect/server/schemas/schedules.py +++ b/src/prefect/server/schemas/schedules.py @@ -11,7 +11,6 @@ import pytz from croniter import croniter from pydantic import AfterValidator, ConfigDict, Field, field_validator, model_validator -from pydantic_extra_types.pendulum_dt import DateTime from prefect._internal.schemas.validators import ( default_anchor_date, @@ -20,7 +19,7 @@ validate_rrule_string, ) from prefect.server.utilities.schemas.bases import PrefectBaseModel -from prefect.types import TimeZone +from prefect.types import DateTime, TimeZone MAX_ITERATIONS = 1000 diff --git a/src/prefect/server/schemas/states.py b/src/prefect/server/schemas/states.py index 69f6b058e3a1..da41805b1956 100644 --- a/src/prefect/server/schemas/states.py +++ b/src/prefect/server/schemas/states.py @@ -9,13 +9,13 @@ import pendulum from pydantic import ConfigDict, Field, field_validator, model_validator -from pydantic_extra_types.pendulum_dt import DateTime from typing_extensions import Self from prefect.server.utilities.schemas.bases import ( IDBaseModel, PrefectBaseModel, ) +from prefect.types import DateTime from prefect.utilities.collections import AutoEnum if TYPE_CHECKING: diff --git a/src/prefect/server/utilities/schemas/bases.py b/src/prefect/server/utilities/schemas/bases.py index da871d38c862..50568a4e100f 100644 --- a/src/prefect/server/utilities/schemas/bases.py +++ b/src/prefect/server/utilities/schemas/bases.py @@ -18,9 +18,10 @@ ConfigDict, Field, ) -from pydantic_extra_types.pendulum_dt import DateTime from typing_extensions import Self +from prefect.types import DateTime + if TYPE_CHECKING: from pydantic.main import IncEx diff --git a/src/prefect/settings/constants.py b/src/prefect/settings/constants.py index ac7520492b61..70d00ccd9394 100644 --- a/src/prefect/settings/constants.py +++ b/src/prefect/settings/constants.py @@ -1,8 +1,8 @@ from pathlib import Path -from typing import Tuple, Type +from typing import Any, Tuple, Type from pydantic import Secret, SecretStr DEFAULT_PREFECT_HOME = Path.home() / ".prefect" DEFAULT_PROFILES_PATH = Path(__file__).parent.joinpath("profiles.toml") -_SECRET_TYPES: Tuple[Type, ...] = (Secret, SecretStr) +_SECRET_TYPES: Tuple[Type[Any], ...] = (Secret, SecretStr) diff --git a/src/prefect/settings/legacy.py b/src/prefect/settings/legacy.py index 17f76e3f1404..6bc496fe1aee 100644 --- a/src/prefect/settings/legacy.py +++ b/src/prefect/settings/legacy.py @@ -8,7 +8,7 @@ from typing_extensions import Self from prefect.settings.base import PrefectBaseSettings -from prefect.settings.constants import _SECRET_TYPES +from prefect.settings.constants import _SECRET_TYPES # type: ignore[reportPrivateUsage] from prefect.settings.context import get_current_settings from prefect.settings.models.root import Settings diff --git a/src/prefect/settings/models/experiments.py b/src/prefect/settings/models/experiments.py index 218128c3dcf1..1ff11c7a13e2 100644 --- a/src/prefect/settings/models/experiments.py +++ b/src/prefect/settings/models/experiments.py @@ -22,3 +22,8 @@ class ExperimentsSettings(PrefectBaseSettings): default=False, description="Enables sending telemetry to Prefect Cloud.", ) + + lineage_events_enabled: bool = Field( + default=False, + description="If `True`, enables emitting lineage events. Set to `False` to disable lineage event emission.", + ) diff --git a/src/prefect/task_engine.py b/src/prefect/task_engine.py index 5accda613f34..46c8d12a9efd 100644 --- a/src/prefect/task_engine.py +++ b/src/prefect/task_engine.py @@ -4,7 +4,7 @@ import threading import time from asyncio import CancelledError -from contextlib import ExitStack, asynccontextmanager, contextmanager +from contextlib import ExitStack, asynccontextmanager, contextmanager, nullcontext from dataclasses import dataclass, field from functools import partial from textwrap import dedent @@ -29,6 +29,7 @@ import anyio import pendulum +from opentelemetry import trace from typing_extensions import ParamSpec from prefect import Task @@ -79,13 +80,14 @@ exception_to_failed_state, return_value_to_state, ) +from prefect.telemetry.run_telemetry import RunTelemetry from prefect.transactions import IsolationLevel, Transaction, transaction +from prefect.utilities._engine import get_hook_name from prefect.utilities.annotations import NotSet from prefect.utilities.asyncutils import run_coro_as_sync from prefect.utilities.callables import call_with_parameters, parameters_to_args_kwargs from prefect.utilities.collections import visit_collection from prefect.utilities.engine import ( - _get_hook_name, emit_task_run_state_change_event, link_state_to_result, resolve_to_final_result, @@ -120,6 +122,7 @@ class BaseTaskRunEngine(Generic[P, R]): _is_started: bool = False _task_name_set: bool = False _last_event: Optional[PrefectEvent] = None + _telemetry: RunTelemetry = field(default_factory=RunTelemetry) def __post_init__(self): if self.parameters is None: @@ -193,11 +196,11 @@ def _resolve_parameters(self): self.parameters = resolved_parameters def _set_custom_task_run_name(self): - from prefect.utilities.engine import _resolve_custom_task_run_name + from prefect.utilities._engine import resolve_custom_task_run_name # update the task run name if necessary if not self._task_name_set and self.task.task_run_name: - task_run_name = _resolve_custom_task_run_name( + task_run_name = resolve_custom_task_run_name( task=self.task, parameters=self.parameters or {} ) @@ -351,7 +354,7 @@ def call_hooks(self, state: Optional[State] = None): hooks = None for hook in hooks or []: - hook_name = _get_hook_name(hook) + hook_name = get_hook_name(hook) try: self.logger.info( @@ -465,7 +468,7 @@ def set_state(self, state: State, force: bool = False) -> State: validated_state=self.task_run.state, follows=self._last_event, ) - + self._telemetry.update_state(new_state) return new_state def result(self, raise_on_failure: bool = True) -> "Union[R, State, None]": @@ -519,6 +522,8 @@ def handle_success(self, result: R, transaction: Transaction) -> R: self.record_terminal_state_timing(terminal_state) self.set_state(terminal_state) self._return_value = result + + self._telemetry.end_span_on_success() return result def handle_retry(self, exc: Exception) -> bool: @@ -567,6 +572,7 @@ def handle_retry(self, exc: Exception) -> bool: def handle_exception(self, exc: Exception) -> None: # If the task fails, and we have retries left, set the task to retrying. + self._telemetry.record_exception(exc) if not self.handle_retry(exc): # If the task has no retries left, or the retry condition is not met, set the task to failed. state = run_coro_as_sync( @@ -580,6 +586,7 @@ def handle_exception(self, exc: Exception) -> None: self.record_terminal_state_timing(state) self.set_state(state) self._raised = exc + self._telemetry.end_span_on_failure(state.message if state else None) def handle_timeout(self, exc: TimeoutError) -> None: if not self.handle_retry(exc): @@ -593,6 +600,7 @@ def handle_timeout(self, exc: TimeoutError) -> None: message=message, name="TimedOut", ) + self.record_terminal_state_timing(state) self.set_state(state) self._raised = exc @@ -603,6 +611,8 @@ def handle_crash(self, exc: BaseException) -> None: self.record_terminal_state_timing(state) self.set_state(state, force=True) self._raised = exc + self._telemetry.record_exception(exc) + self._telemetry.end_span_on_failure(state.message if state else None) @contextmanager def setup_run_context(self, client: Optional[SyncPrefectClient] = None): @@ -660,14 +670,17 @@ def initialize_run( with SyncClientContext.get_or_create() as client_ctx: self._client = client_ctx.client self._is_started = True + parent_flow_run_context = FlowRunContext.get() + parent_task_run_context = TaskRunContext.get() + try: if not self.task_run: self.task_run = run_coro_as_sync( self.task.create_local_run( id=task_run_id, parameters=self.parameters, - flow_run_context=FlowRunContext.get(), - parent_task_run_context=TaskRunContext.get(), + flow_run_context=parent_flow_run_context, + parent_task_run_context=parent_task_run_context, wait_for=self.wait_for, extra_task_inputs=dependencies, ) @@ -684,6 +697,19 @@ def initialize_run( self.logger.debug( f"Created task run {self.task_run.name!r} for task {self.task.name!r}" ) + + parent_labels = {} + if parent_flow_run_context and parent_flow_run_context.flow_run: + parent_labels = parent_flow_run_context.flow_run.labels + + self._telemetry.start_span( + run=self.task_run, + name=self.task.name, + client=self.client, + parameters=self.parameters, + parent_labels=parent_labels, + ) + yield self except TerminationSignal as exc: @@ -735,11 +761,14 @@ def start( dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None, ) -> Generator[None, None, None]: with self.initialize_run(task_run_id=task_run_id, dependencies=dependencies): - self.begin_run() - try: - yield - finally: - self.call_hooks() + with trace.use_span( + self._telemetry.span + ) if self._telemetry.span else nullcontext(): + self.begin_run() + try: + yield + finally: + self.call_hooks() @contextmanager def transaction_context(self) -> Generator[Transaction, None, None]: @@ -871,7 +900,7 @@ async def call_hooks(self, state: Optional[State] = None): hooks = None for hook in hooks or []: - hook_name = _get_hook_name(hook) + hook_name = get_hook_name(hook) try: self.logger.info( @@ -987,6 +1016,7 @@ async def set_state(self, state: State, force: bool = False) -> State: follows=self._last_event, ) + self._telemetry.update_state(new_state) return new_state async def result(self, raise_on_failure: bool = True) -> "Union[R, State, None]": @@ -1035,6 +1065,9 @@ async def handle_success(self, result: R, transaction: Transaction) -> R: self.record_terminal_state_timing(terminal_state) await self.set_state(terminal_state) self._return_value = result + + self._telemetry.end_span_on_success() + return result async def handle_retry(self, exc: Exception) -> bool: @@ -1083,6 +1116,7 @@ async def handle_retry(self, exc: Exception) -> bool: async def handle_exception(self, exc: Exception) -> None: # If the task fails, and we have retries left, set the task to retrying. + self._telemetry.record_exception(exc) if not await self.handle_retry(exc): # If the task has no retries left, or the retry condition is not met, set the task to failed. state = await exception_to_failed_state( @@ -1094,7 +1128,10 @@ async def handle_exception(self, exc: Exception) -> None: await self.set_state(state) self._raised = exc + self._telemetry.end_span_on_failure(state.message) + async def handle_timeout(self, exc: TimeoutError) -> None: + self._telemetry.record_exception(exc) if not await self.handle_retry(exc): if isinstance(exc, TaskRunTimeoutError): message = f"Task run exceeded timeout of {self.task.timeout_seconds} second(s)" @@ -1106,8 +1143,10 @@ async def handle_timeout(self, exc: TimeoutError) -> None: message=message, name="TimedOut", ) + self.record_terminal_state_timing(state) await self.set_state(state) self._raised = exc + self._telemetry.end_span_on_failure(state.message) async def handle_crash(self, exc: BaseException) -> None: state = await exception_to_crashed_state(exc) @@ -1117,6 +1156,9 @@ async def handle_crash(self, exc: BaseException) -> None: await self.set_state(state, force=True) self._raised = exc + self._telemetry.record_exception(exc) + self._telemetry.end_span_on_failure(state.message) + @asynccontextmanager async def setup_run_context(self, client: Optional[PrefectClient] = None): from prefect.utilities.engine import ( @@ -1172,13 +1214,16 @@ async def initialize_run( async with AsyncClientContext.get_or_create(): self._client = get_client() self._is_started = True + parent_flow_run_context = FlowRunContext.get() + parent_task_run_context = TaskRunContext.get() + try: if not self.task_run: self.task_run = await self.task.create_local_run( id=task_run_id, parameters=self.parameters, - flow_run_context=FlowRunContext.get(), - parent_task_run_context=TaskRunContext.get(), + flow_run_context=parent_flow_run_context, + parent_task_run_context=parent_task_run_context, wait_for=self.wait_for, extra_task_inputs=dependencies, ) @@ -1194,6 +1239,19 @@ async def initialize_run( self.logger.debug( f"Created task run {self.task_run.name!r} for task {self.task.name!r}" ) + + parent_labels = {} + if parent_flow_run_context and parent_flow_run_context.flow_run: + parent_labels = parent_flow_run_context.flow_run.labels + + await self._telemetry.async_start_span( + run=self.task_run, + name=self.task.name, + client=self.client, + parameters=self.parameters, + parent_labels=parent_labels, + ) + yield self except TerminationSignal as exc: @@ -1247,11 +1305,14 @@ async def start( async with self.initialize_run( task_run_id=task_run_id, dependencies=dependencies ): - await self.begin_run() - try: - yield - finally: - await self.call_hooks() + with trace.use_span( + self._telemetry.span + ) if self._telemetry.span else nullcontext(): + await self.begin_run() + try: + yield + finally: + await self.call_hooks() @asynccontextmanager async def transaction_context(self) -> AsyncGenerator[Transaction, None]: @@ -1327,7 +1388,7 @@ def run_task_sync( task_run_id: Optional[UUID] = None, task_run: Optional[TaskRun] = None, parameters: Optional[Dict[str, Any]] = None, - wait_for: Optional[Iterable[PrefectFuture]] = None, + wait_for: Optional[Iterable[PrefectFuture[R]]] = None, return_type: Literal["state", "result"] = "result", dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None, context: Optional[Dict[str, Any]] = None, @@ -1354,7 +1415,7 @@ async def run_task_async( task_run_id: Optional[UUID] = None, task_run: Optional[TaskRun] = None, parameters: Optional[Dict[str, Any]] = None, - wait_for: Optional[Iterable[PrefectFuture]] = None, + wait_for: Optional[Iterable[PrefectFuture[R]]] = None, return_type: Literal["state", "result"] = "result", dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None, context: Optional[Dict[str, Any]] = None, @@ -1381,7 +1442,7 @@ def run_generator_task_sync( task_run_id: Optional[UUID] = None, task_run: Optional[TaskRun] = None, parameters: Optional[Dict[str, Any]] = None, - wait_for: Optional[Iterable[PrefectFuture]] = None, + wait_for: Optional[Iterable[PrefectFuture[R]]] = None, return_type: Literal["state", "result"] = "result", dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None, context: Optional[Dict[str, Any]] = None, @@ -1436,7 +1497,7 @@ async def run_generator_task_async( task_run_id: Optional[UUID] = None, task_run: Optional[TaskRun] = None, parameters: Optional[Dict[str, Any]] = None, - wait_for: Optional[Iterable[PrefectFuture]] = None, + wait_for: Optional[Iterable[PrefectFuture[R]]] = None, return_type: Literal["state", "result"] = "result", dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None, context: Optional[Dict[str, Any]] = None, @@ -1492,7 +1553,7 @@ def run_task( task_run_id: Optional[UUID] = None, task_run: Optional[TaskRun] = None, parameters: Optional[Dict[str, Any]] = None, - wait_for: Optional[Iterable[PrefectFuture]] = None, + wait_for: Optional[Iterable[PrefectFuture[R]]] = None, return_type: Literal["state", "result"] = "result", dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None, context: Optional[Dict[str, Any]] = None, diff --git a/src/prefect/task_runners.py b/src/prefect/task_runners.py index 734bcf73b560..497e34c1fbf5 100644 --- a/src/prefect/task_runners.py +++ b/src/prefect/task_runners.py @@ -97,9 +97,9 @@ def submit( def map( self, - task: "Task", + task: "Task[P, R]", parameters: Dict[str, Any], - wait_for: Optional[Iterable[PrefectFuture]] = None, + wait_for: Optional[Iterable[PrefectFuture[R]]] = None, ) -> PrefectFutureList[F]: """ Submit multiple tasks to the task run engine. diff --git a/src/prefect/task_worker.py b/src/prefect/task_worker.py index 6c75bd31f98f..2afae1032cf4 100644 --- a/src/prefect/task_worker.py +++ b/src/prefect/task_worker.py @@ -7,7 +7,7 @@ from concurrent.futures import ThreadPoolExecutor from contextlib import AsyncExitStack from contextvars import copy_context -from typing import Optional +from typing import TYPE_CHECKING, Any, Optional from uuid import UUID import anyio @@ -16,6 +16,7 @@ import uvicorn from exceptiongroup import BaseExceptionGroup # novermin from fastapi import FastAPI +from typing_extensions import ParamSpec, TypeVar from websockets.exceptions import InvalidStatusCode from prefect import Task @@ -35,12 +36,17 @@ from prefect.utilities.annotations import NotSet from prefect.utilities.asyncutils import asyncnullcontext, sync_compatible from prefect.utilities.engine import emit_task_run_state_change_event -from prefect.utilities.processutils import _register_signal +from prefect.utilities.processutils import ( + _register_signal, # pyright: ignore[reportPrivateUsage] +) from prefect.utilities.services import start_client_metrics_server from prefect.utilities.urls import url_for logger = get_logger("task_worker") +P = ParamSpec("P") +R = TypeVar("R", infer_variance=True) + class StopTaskWorker(Exception): """Raised when the task worker is stopped.""" @@ -48,8 +54,10 @@ class StopTaskWorker(Exception): pass -def should_try_to_read_parameters(task: Task, task_run: TaskRun) -> bool: +def should_try_to_read_parameters(task: Task[P, R], task_run: TaskRun) -> bool: """Determines whether a task run should read parameters from the result store.""" + if TYPE_CHECKING: + assert task_run.state is not None new_enough_state_details = hasattr( task_run.state.state_details, "task_parameters_id" ) @@ -76,20 +84,23 @@ class TaskWorker: def __init__( self, - *tasks: Task, + *tasks: Task[P, R], limit: Optional[int] = 10, ): - self.tasks = [] + self.tasks: list["Task[..., Any]"] = [] for t in tasks: - if isinstance(t, Task): - if t.cache_policy in [None, NONE, NotSet]: - self.tasks.append( - t.with_options(persist_result=True, cache_policy=DEFAULT) - ) - else: - self.tasks.append(t.with_options(persist_result=True)) + if not TYPE_CHECKING: + if not isinstance(t, Task): + continue - self.task_keys = set(t.task_key for t in tasks if isinstance(t, Task)) + if t.cache_policy in [None, NONE, NotSet]: + self.tasks.append( + t.with_options(persist_result=True, cache_policy=DEFAULT) + ) + else: + self.tasks.append(t.with_options(persist_result=True)) + + self.task_keys = set(t.task_key for t in tasks if isinstance(t, Task)) # pyright: ignore[reportUnnecessaryIsInstance] self._started_at: Optional[pendulum.DateTime] = None self.stopping: bool = False @@ -97,7 +108,9 @@ def __init__( self._client = get_client() self._exit_stack = AsyncExitStack() - if not asyncio.get_event_loop().is_running(): + try: + asyncio.get_running_loop() + except RuntimeError: raise RuntimeError( "TaskWorker must be initialized within an async context." ) @@ -141,7 +154,7 @@ def current_tasks(self) -> Optional[int]: def available_tasks(self) -> Optional[int]: return int(self._limiter.available_tokens) if self._limiter else None - def handle_sigterm(self, signum, frame): + def handle_sigterm(self, signum: int, frame: object): """ Shuts down the task worker when a SIGTERM is received. """ @@ -252,6 +265,8 @@ async def _safe_submit_scheduled_task_run(self, task_run: TaskRun): self._release_token(task_run.id) async def _submit_scheduled_task_run(self, task_run: TaskRun): + if TYPE_CHECKING: + assert task_run.state is not None logger.debug( f"Found task run: {task_run.name!r} in state: {task_run.state.name!r}" ) @@ -280,7 +295,7 @@ async def _submit_scheduled_task_run(self, task_run: TaskRun): result_storage=await get_or_create_default_task_scheduling_storage() ).update_for_task(task) try: - run_data = await store.read_parameters(parameters_id) + run_data: dict[str, Any] = await store.read_parameters(parameters_id) parameters = run_data.get("parameters", {}) wait_for = run_data.get("wait_for", []) run_context = run_data.get("context", None) @@ -350,7 +365,7 @@ async def execute_task_run(self, task_run: TaskRun): async def __aenter__(self): logger.debug("Starting task worker...") - if self._client._closed: + if self._client._closed: # pyright: ignore[reportPrivateUsage] self._client = get_client() self._runs_task_group = anyio.create_task_group() @@ -362,7 +377,7 @@ async def __aenter__(self): self._started_at = pendulum.now() return self - async def __aexit__(self, *exc_info): + async def __aexit__(self, *exc_info: Any) -> None: logger.debug("Stopping task worker...") self._started_at = None await self._exit_stack.__aexit__(*exc_info) @@ -372,7 +387,9 @@ def create_status_server(task_worker: TaskWorker) -> FastAPI: status_app = FastAPI() @status_app.get("/status") - def status(): + def status(): # pyright: ignore[reportUnusedFunction] + if TYPE_CHECKING: + assert task_worker.started_at is not None return { "client_id": task_worker.client_id, "started_at": task_worker.started_at.isoformat(), @@ -393,11 +410,13 @@ def status(): @sync_compatible async def serve( - *tasks: Task, limit: Optional[int] = 10, status_server_port: Optional[int] = None + *tasks: Task[P, R], + limit: Optional[int] = 10, + status_server_port: Optional[int] = None, ): - """Serve the provided tasks so that their runs may be submitted to and executed. - in the engine. Tasks do not need to be within a flow run context to be submitted. - You must `.submit` the same task object that you pass to `serve`. + """Serve the provided tasks so that their runs may be submitted to + and executed in the engine. Tasks do not need to be within a flow run context to be + submitted. You must `.submit` the same task object that you pass to `serve`. Args: - tasks: A list of tasks to serve. When a scheduled task run is found for a @@ -422,8 +441,7 @@ def yell(message: str): print(message.upper()) # starts a long-lived process that listens for scheduled runs of these tasks - if __name__ == "__main__": - serve(say, yell) + serve(say, yell) ``` """ task_worker = TaskWorker(*tasks, limit=limit) diff --git a/src/prefect/tasks.py b/src/prefect/tasks.py index cc271ec226c2..0d6610009847 100644 --- a/src/prefect/tasks.py +++ b/src/prefect/tasks.py @@ -21,6 +21,7 @@ List, NoReturn, Optional, + Protocol, Set, Tuple, Type, @@ -31,7 +32,7 @@ ) from uuid import UUID, uuid4 -from typing_extensions import Literal, ParamSpec +from typing_extensions import Literal, ParamSpec, Self, TypeAlias, TypeIs import prefect.states from prefect.cache_policies import DEFAULT, NONE, CachePolicy @@ -63,10 +64,7 @@ ) from prefect.states import Pending, Scheduled, State from prefect.utilities.annotations import NotSet -from prefect.utilities.asyncutils import ( - run_coro_as_sync, - sync_compatible, -) +from prefect.utilities.asyncutils import run_coro_as_sync, sync_compatible from prefect.utilities.callables import ( expand_mapping_parameters, get_call_parameters, @@ -89,6 +87,10 @@ logger = get_logger("tasks") +StateHookCallable: TypeAlias = Callable[ + ["Task[..., Any]", TaskRun, State], Union[Awaitable[None], None] +] + def task_input_hash( context: "TaskRunContext", arguments: Dict[str, Any] @@ -141,8 +143,8 @@ def retry_backoff_callable(retries: int) -> List[float]: def _infer_parent_task_runs( flow_run_context: Optional[FlowRunContext], task_run_context: Optional[TaskRunContext], - parameters: Dict[str, Any], -): + parameters: dict[str, Any], +) -> list[TaskRunResult]: """ Attempt to infer the parent task runs for this task run based on the provided flow run and task run contexts, as well as any parameters. It is @@ -151,7 +153,7 @@ def _infer_parent_task_runs( a parent. This is expected to happen when task inputs are yielded from generator tasks. """ - parents = [] + parents: list[TaskRunResult] = [] # check if this task has a parent task run based on running in another # task run's existing context. A task run is only considered a parent if @@ -223,6 +225,16 @@ def _generate_task_key(fn: Callable[..., Any]) -> str: return f"{qualname}-{code_hash}" +class TaskRunNameCallbackWithParameters(Protocol): + @classmethod + def is_callback_with_parameters(cls, callable: Callable[..., str]) -> TypeIs[Self]: + sig = inspect.signature(callable) + return "parameters" in sig.parameters + + def __call__(self, parameters: dict[str, Any]) -> str: + ... + + class Task(Generic[P, R]): """ A Prefect task definition. @@ -311,7 +323,7 @@ def __init__( ] = None, cache_expiration: Optional[datetime.timedelta] = None, task_run_name: Optional[ - Union[Callable[[], str], Callable[[Dict[str, Any]], str], str] + Union[Callable[[], str], TaskRunNameCallbackWithParameters, str] ] = None, retries: Optional[int] = None, retry_delay_seconds: Optional[ @@ -331,11 +343,13 @@ def __init__( timeout_seconds: Union[int, float, None] = None, log_prints: Optional[bool] = False, refresh_cache: Optional[bool] = None, - on_completion: Optional[List[Callable[["Task", TaskRun, State], None]]] = None, - on_failure: Optional[List[Callable[["Task", TaskRun, State], None]]] = None, - on_rollback: Optional[List[Callable[["Transaction"], None]]] = None, - on_commit: Optional[List[Callable[["Transaction"], None]]] = None, - retry_condition_fn: Optional[Callable[["Task", TaskRun, State], bool]] = None, + on_completion: Optional[list[StateHookCallable]] = None, + on_failure: Optional[list[StateHookCallable]] = None, + on_rollback: Optional[list[Callable[["Transaction"], None]]] = None, + on_commit: Optional[list[Callable[["Transaction"], None]]] = None, + retry_condition_fn: Optional[ + Callable[["Task[..., Any]", TaskRun, State], bool] + ] = None, viz_return_value: Optional[Any] = None, ): # Validate if hook passed is list and contains callables @@ -460,6 +474,10 @@ def __init__( if callable(retry_delay_seconds): self.retry_delay_seconds = retry_delay_seconds(retries) + elif not isinstance(retry_delay_seconds, (list, int, float, type(None))): + raise TypeError( + f"Invalid `retry_delay_seconds` provided; must be an int, float, list or callable. Received type {type(retry_delay_seconds)}" + ) else: self.retry_delay_seconds = retry_delay_seconds @@ -505,7 +523,7 @@ def __init__( def ismethod(self) -> bool: return hasattr(self.fn, "__prefect_self__") - def __get__(self, instance, owner): + def __get__(self, instance: Any, owner: Any): """ Implement the descriptor protocol so that the task can be used as an instance method. When an instance method is loaded, this method is called with the "self" instance as @@ -534,7 +552,9 @@ def with_options( Callable[["TaskRunContext", Dict[str, Any]], Optional[str]] ] = None, task_run_name: Optional[ - Union[Callable[[], str], Callable[[Dict[str, Any]], str], str, Type[NotSet]] + Union[ + Callable[[], str], TaskRunNameCallbackWithParameters, str, Type[NotSet] + ] ] = NotSet, cache_expiration: Optional[datetime.timedelta] = None, retries: Union[int, Type[NotSet]] = NotSet, @@ -554,13 +574,11 @@ def with_options( timeout_seconds: Union[int, float, None] = None, log_prints: Union[bool, Type[NotSet]] = NotSet, refresh_cache: Union[bool, Type[NotSet]] = NotSet, - on_completion: Optional[ - List[Callable[["Task", TaskRun, State], Union[Awaitable[None], None]]] + on_completion: Optional[list[StateHookCallable]] = None, + on_failure: Optional[list[StateHookCallable]] = None, + retry_condition_fn: Optional[ + Callable[["Task[..., Any]", TaskRun, State], bool] ] = None, - on_failure: Optional[ - List[Callable[["Task", TaskRun, State], Union[Awaitable[None], None]]] - ] = None, - retry_condition_fn: Optional[Callable[["Task", TaskRun, State], bool]] = None, viz_return_value: Optional[Any] = None, ): """ @@ -697,15 +715,11 @@ def with_options( viz_return_value=viz_return_value or self.viz_return_value, ) - def on_completion( - self, fn: Callable[["Task", TaskRun, State], None] - ) -> Callable[["Task", TaskRun, State], None]: + def on_completion(self, fn: StateHookCallable) -> StateHookCallable: self.on_completion_hooks.append(fn) return fn - def on_failure( - self, fn: Callable[["Task", TaskRun, State], None] - ) -> Callable[["Task", TaskRun, State], None]: + def on_failure(self, fn: StateHookCallable) -> StateHookCallable: self.on_failure_hooks.append(fn) return fn @@ -728,14 +742,12 @@ async def create_run( parameters: Optional[Dict[str, Any]] = None, flow_run_context: Optional[FlowRunContext] = None, parent_task_run_context: Optional[TaskRunContext] = None, - wait_for: Optional[Iterable[PrefectFuture]] = None, + wait_for: Optional[Iterable[PrefectFuture[R]]] = None, extra_task_inputs: Optional[Dict[str, Set[TaskRunInput]]] = None, deferred: bool = False, ) -> TaskRun: - from prefect.utilities.engine import ( - _dynamic_key_for_task_run, - collect_task_run_inputs_sync, - ) + from prefect.utilities._engine import dynamic_key_for_task_run + from prefect.utilities.engine import collect_task_run_inputs_sync if flow_run_context is None: flow_run_context = FlowRunContext.get() @@ -751,7 +763,7 @@ async def create_run( dynamic_key = f"{self.task_key}-{str(uuid4().hex)}" task_run_name = self.name else: - dynamic_key = _dynamic_key_for_task_run( + dynamic_key = dynamic_key_for_task_run( context=flow_run_context, task=self ) task_run_name = f"{self.name}-{dynamic_key}" @@ -831,14 +843,12 @@ async def create_local_run( parameters: Optional[Dict[str, Any]] = None, flow_run_context: Optional[FlowRunContext] = None, parent_task_run_context: Optional[TaskRunContext] = None, - wait_for: Optional[Iterable[PrefectFuture]] = None, + wait_for: Optional[Iterable[PrefectFuture[R]]] = None, extra_task_inputs: Optional[Dict[str, Set[TaskRunInput]]] = None, deferred: bool = False, ) -> TaskRun: - from prefect.utilities.engine import ( - _dynamic_key_for_task_run, - collect_task_run_inputs_sync, - ) + from prefect.utilities._engine import dynamic_key_for_task_run + from prefect.utilities.engine import collect_task_run_inputs_sync if flow_run_context is None: flow_run_context = FlowRunContext.get() @@ -854,7 +864,7 @@ async def create_local_run( dynamic_key = f"{self.task_key}-{str(uuid4().hex)}" task_run_name = self.name else: - dynamic_key = _dynamic_key_for_task_run( + dynamic_key = dynamic_key_for_task_run( context=flow_run_context, task=self, stable=False ) task_run_name = f"{self.name}-{dynamic_key[:3]}" @@ -974,7 +984,7 @@ def __call__( self, *args: P.args, return_state: bool = False, - wait_for: Optional[Iterable[PrefectFuture]] = None, + wait_for: Optional[Iterable[PrefectFuture[R]]] = None, **kwargs: P.kwargs, ): """ @@ -1054,7 +1064,7 @@ def submit( self, *args: Any, return_state: bool = False, - wait_for: Optional[Iterable[PrefectFuture]] = None, + wait_for: Optional[Iterable[PrefectFuture[R]]] = None, **kwargs: Any, ): """ @@ -1179,7 +1189,7 @@ def map( self: "Task[P, R]", *args: Any, return_state: Literal[True], - wait_for: Optional[Iterable[Union[PrefectFuture[T], T]]] = ..., + wait_for: Optional[Iterable[Union[PrefectFuture[R], R]]] = ..., deferred: bool = ..., **kwargs: Any, ) -> List[State[R]]: @@ -1189,7 +1199,7 @@ def map( def map( self: "Task[P, R]", *args: Any, - wait_for: Optional[Iterable[Union[PrefectFuture[T], T]]] = ..., + wait_for: Optional[Iterable[Union[PrefectFuture[R], R]]] = ..., deferred: bool = ..., **kwargs: Any, ) -> PrefectFutureList[R]: @@ -1200,7 +1210,7 @@ def map( self: "Task[P, R]", *args: Any, return_state: Literal[True], - wait_for: Optional[Iterable[Union[PrefectFuture[T], T]]] = ..., + wait_for: Optional[Iterable[Union[PrefectFuture[R], R]]] = ..., deferred: bool = ..., **kwargs: Any, ) -> List[State[R]]: @@ -1210,7 +1220,7 @@ def map( def map( self: "Task[P, R]", *args: Any, - wait_for: Optional[Iterable[Union[PrefectFuture[T], T]]] = ..., + wait_for: Optional[Iterable[Union[PrefectFuture[R], R]]] = ..., deferred: bool = ..., **kwargs: Any, ) -> PrefectFutureList[R]: @@ -1221,7 +1231,7 @@ def map( self: "Task[P, Coroutine[Any, Any, R]]", *args: Any, return_state: Literal[True], - wait_for: Optional[Iterable[Union[PrefectFuture[T], T]]] = ..., + wait_for: Optional[Iterable[Union[PrefectFuture[R], R]]] = ..., deferred: bool = ..., **kwargs: Any, ) -> List[State[R]]: @@ -1232,7 +1242,7 @@ def map( self: "Task[P, Coroutine[Any, Any, R]]", *args: Any, return_state: Literal[False], - wait_for: Optional[Iterable[Union[PrefectFuture[T], T]]] = ..., + wait_for: Optional[Iterable[Union[PrefectFuture[R], R]]] = ..., deferred: bool = ..., **kwargs: Any, ) -> PrefectFutureList[R]: @@ -1242,10 +1252,10 @@ def map( self, *args: Any, return_state: bool = False, - wait_for: Optional[Iterable[Union[PrefectFuture[T], T]]] = None, + wait_for: Optional[Iterable[Union[PrefectFuture[R], R]]] = None, deferred: bool = False, **kwargs: Any, - ): + ) -> Union[List[State[R]], PrefectFutureList[R]]: """ Submit a mapped run of the task to a worker. @@ -1394,7 +1404,7 @@ def map( " execution." ) if return_state: - states = [] + states: list[State[R]] = [] for future in futures: future.wait() states.append(future.state) @@ -1406,9 +1416,9 @@ def apply_async( self, args: Optional[Tuple[Any, ...]] = None, kwargs: Optional[Dict[str, Any]] = None, - wait_for: Optional[Iterable[PrefectFuture]] = None, + wait_for: Optional[Iterable[PrefectFuture[R]]] = None, dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None, - ) -> PrefectDistributedFuture: + ) -> PrefectDistributedFuture[R]: """ Create a pending task run for a task worker to execute. @@ -1507,7 +1517,7 @@ def apply_async( return PrefectDistributedFuture(task_run_id=task_run.id) - def delay(self, *args: P.args, **kwargs: P.kwargs) -> PrefectDistributedFuture: + def delay(self, *args: P.args, **kwargs: P.kwargs) -> PrefectDistributedFuture[R]: """ An alias for `apply_async` with simpler calling semantics. @@ -1588,7 +1598,7 @@ def task( ] = None, cache_expiration: Optional[datetime.timedelta] = None, task_run_name: Optional[ - Union[Callable[[], str], Callable[[Dict[str, Any]], str], str] + Union[Callable[[], str], TaskRunNameCallbackWithParameters, str] ] = None, retries: int = 0, retry_delay_seconds: Union[ @@ -1606,16 +1616,18 @@ def task( timeout_seconds: Union[int, float, None] = None, log_prints: Optional[bool] = None, refresh_cache: Optional[bool] = None, - on_completion: Optional[List[Callable[["Task", TaskRun, State], None]]] = None, - on_failure: Optional[List[Callable[["Task", TaskRun, State], None]]] = None, - retry_condition_fn: Optional[Callable[["Task", TaskRun, State], bool]] = None, + on_completion: Optional[ + List[Callable[["Task[P, R]", TaskRun, State], None]] + ] = None, + on_failure: Optional[List[Callable[["Task[P, R]", TaskRun, State], None]]] = None, + retry_condition_fn: Optional[Callable[["Task[P, R]", TaskRun, State], bool]] = None, viz_return_value: Any = None, ) -> Callable[[Callable[P, R]], Task[P, R]]: ... def task( - __fn=None, + __fn: Optional[Callable[P, R]] = None, *, name: Optional[str] = None, description: Optional[str] = None, @@ -1627,7 +1639,7 @@ def task( ] = None, cache_expiration: Optional[datetime.timedelta] = None, task_run_name: Optional[ - Union[Callable[[], str], Callable[[Dict[str, Any]], str], str] + Union[Callable[[], str], TaskRunNameCallbackWithParameters, str] ] = None, retries: Optional[int] = None, retry_delay_seconds: Union[ @@ -1642,9 +1654,11 @@ def task( timeout_seconds: Union[int, float, None] = None, log_prints: Optional[bool] = None, refresh_cache: Optional[bool] = None, - on_completion: Optional[List[Callable[["Task", TaskRun, State], None]]] = None, - on_failure: Optional[List[Callable[["Task", TaskRun, State], None]]] = None, - retry_condition_fn: Optional[Callable[["Task", TaskRun, State], bool]] = None, + on_completion: Optional[ + List[Callable[["Task[P, R]", TaskRun, State], None]] + ] = None, + on_failure: Optional[List[Callable[["Task[P, R]", TaskRun, State], None]]] = None, + retry_condition_fn: Optional[Callable[["Task[P, R]", TaskRun, State], bool]] = None, viz_return_value: Any = None, ): """ diff --git a/src/prefect/telemetry/bootstrap.py b/src/prefect/telemetry/bootstrap.py index 89aedad095f3..0f6fe656cf48 100644 --- a/src/prefect/telemetry/bootstrap.py +++ b/src/prefect/telemetry/bootstrap.py @@ -23,10 +23,23 @@ def setup_telemetry() -> ( if server_type != ServerType.CLOUD: return None, None, None - assert settings.api.key + if not settings.api.key: + raise ValueError( + "A Prefect Cloud API key is required to enable telemetry. Please set " + "the `PREFECT_API_KEY` environment variable or authenticate with " + "Prefect Cloud via the `prefect cloud login` command." + ) + assert settings.api.url # This import is here to defer importing of the `opentelemetry` packages. - from .instrumentation import setup_exporters + try: + from .instrumentation import setup_exporters + except ImportError as exc: + raise ValueError( + "Unable to import OpenTelemetry instrumentation libraries. Please " + "ensure you have installed the `otel` extra when installing Prefect: " + "`pip install 'prefect[otel]'`" + ) from exc return setup_exporters(settings.api.url, settings.api.key.get_secret_value()) diff --git a/src/prefect/telemetry/instrumentation.py b/src/prefect/telemetry/instrumentation.py index bb1ddbfcb425..f1f458b785c1 100644 --- a/src/prefect/telemetry/instrumentation.py +++ b/src/prefect/telemetry/instrumentation.py @@ -55,7 +55,7 @@ def _url_join(base_url: str, path: str) -> str: def setup_exporters( api_url: str, api_key: str -) -> tuple[TracerProvider, MeterProvider, "LoggerProvider"]: +) -> "tuple[TracerProvider, MeterProvider, LoggerProvider]": account_id, workspace_id = extract_account_and_workspace_id(api_url) telemetry_url = _url_join(api_url, "telemetry/") diff --git a/src/prefect/telemetry/processors.py b/src/prefect/telemetry/processors.py index f5f1dc663e9c..03a33ab0f2b6 100644 --- a/src/prefect/telemetry/processors.py +++ b/src/prefect/telemetry/processors.py @@ -1,14 +1,17 @@ import time from threading import Event, Lock, Thread -from typing import Dict, Optional +from typing import TYPE_CHECKING, Dict, Optional from opentelemetry.context import Context -from opentelemetry.sdk.trace import ReadableSpan, Span, SpanProcessor -from opentelemetry.sdk.trace.export import SpanExporter +from opentelemetry.sdk.trace import Span, SpanProcessor + +if TYPE_CHECKING: + from opentelemetry.sdk.trace import ReadableSpan, Span + from opentelemetry.sdk.trace.export import SpanExporter class InFlightSpanProcessor(SpanProcessor): - def __init__(self, span_exporter: SpanExporter): + def __init__(self, span_exporter: "SpanExporter"): self.span_exporter = span_exporter self._in_flight: Dict[int, Span] = {} self._lock = Lock() @@ -26,7 +29,7 @@ def _export_periodically(self) -> None: if to_export: self.span_exporter.export(to_export) - def _readable_span(self, span: Span) -> ReadableSpan: + def _readable_span(self, span: "Span") -> "ReadableSpan": readable = span._readable_span() readable._end_time = time.time_ns() readable._attributes = { @@ -35,13 +38,13 @@ def _readable_span(self, span: Span) -> ReadableSpan: } return readable - def on_start(self, span: Span, parent_context: Optional[Context] = None) -> None: + def on_start(self, span: "Span", parent_context: Optional[Context] = None) -> None: if not span.context or not span.context.trace_flags.sampled: return with self._lock: self._in_flight[span.context.span_id] = span - def on_end(self, span: ReadableSpan) -> None: + def on_end(self, span: "ReadableSpan") -> None: if not span.context or not span.context.trace_flags.sampled: return with self._lock: diff --git a/src/prefect/telemetry/run_telemetry.py b/src/prefect/telemetry/run_telemetry.py new file mode 100644 index 000000000000..ab7ef7aee729 --- /dev/null +++ b/src/prefect/telemetry/run_telemetry.py @@ -0,0 +1,224 @@ +import time +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any, Optional, Union + +from opentelemetry import propagate, trace +from opentelemetry.context import Context +from opentelemetry.propagators.textmap import Setter +from opentelemetry.trace import ( + Span, + Status, + StatusCode, + get_tracer, +) +from typing_extensions import TypeAlias + +import prefect +from prefect.client.orchestration import PrefectClient, SyncPrefectClient +from prefect.client.schemas import FlowRun, TaskRun +from prefect.client.schemas.objects import State +from prefect.context import FlowRunContext +from prefect.types import KeyValueLabels + +if TYPE_CHECKING: + from opentelemetry.trace import Tracer + +LABELS_TRACEPARENT_KEY = "__OTEL_TRACEPARENT" +TRACEPARENT_KEY = "traceparent" + +FlowOrTaskRun: TypeAlias = Union[FlowRun, TaskRun] + + +class OTELSetter(Setter[KeyValueLabels]): + """ + A setter for OpenTelemetry that supports Prefect's custom labels. + """ + + def set(self, carrier: KeyValueLabels, key: str, value: str) -> None: + carrier[key] = value + + +@dataclass +class RunTelemetry: + """ + A class for managing the telemetry of runs. + """ + + _tracer: "Tracer" = field( + default_factory=lambda: get_tracer("prefect", prefect.__version__) + ) + span: Optional[Span] = None + + async def async_start_span( + self, + run: FlowOrTaskRun, + client: PrefectClient, + name: Optional[str] = None, + parameters: Optional[dict[str, Any]] = None, + parent_labels: Optional[dict[str, Any]] = None, + ): + should_set_traceparent = self._should_set_traceparent(run) + traceparent, span = self._start_span(run, name, parameters, parent_labels) + + if should_set_traceparent and traceparent: + await client.update_flow_run_labels( + run.id, {LABELS_TRACEPARENT_KEY: traceparent} + ) + + return span + + def start_span( + self, + run: FlowOrTaskRun, + client: SyncPrefectClient, + name: Optional[str] = None, + parameters: Optional[dict[str, Any]] = None, + parent_labels: Optional[dict[str, Any]] = None, + ): + should_set_traceparent = self._should_set_traceparent(run) + traceparent, span = self._start_span(run, name, parameters, parent_labels) + + if should_set_traceparent and traceparent: + client.update_flow_run_labels(run.id, {LABELS_TRACEPARENT_KEY: traceparent}) + + return span + + def _start_span( + self, + run: FlowOrTaskRun, + name: Optional[str] = None, + parameters: Optional[dict[str, Any]] = None, + parent_labels: Optional[dict[str, Any]] = None, + ) -> tuple[Optional[str], Span]: + """ + Start a span for a task run. + """ + if parameters is None: + parameters = {} + if parent_labels is None: + parent_labels = {} + parameter_attributes = { + f"prefect.run.parameter.{k}": type(v).__name__ + for k, v in parameters.items() + } + + traceparent, context = self._traceparent_and_context_from_labels( + {**parent_labels, **run.labels} + ) + run_type = self._run_type(run) + + self.span = self._tracer.start_span( + name=name or run.name, + context=context, + attributes={ + f"prefect.{run_type}.name": name or run.name, + "prefect.run.type": run_type, + "prefect.run.id": str(run.id), + "prefect.tags": run.tags, + **parameter_attributes, + **parent_labels, + }, + ) + + if not traceparent: + traceparent = self._traceparent_from_span(self.span) + + if traceparent and LABELS_TRACEPARENT_KEY not in run.labels: + run.labels[LABELS_TRACEPARENT_KEY] = traceparent + + return traceparent, self.span + + def _run_type(self, run: FlowOrTaskRun) -> str: + return "task" if isinstance(run, TaskRun) else "flow" + + def _should_set_traceparent(self, run: FlowOrTaskRun) -> bool: + # If the run is a flow run and it doesn't already have a traceparent, + # we need to update its labels with the traceparent so that its + # propagated to child runs. Task runs are updated via events so we + # don't need to update them via the client in the same way. + return ( + LABELS_TRACEPARENT_KEY not in run.labels and self._run_type(run) == "flow" + ) + + def _traceparent_and_context_from_labels( + self, labels: Optional[KeyValueLabels] + ) -> tuple[Optional[str], Optional[Context]]: + """Get trace context from run labels if it exists.""" + if not labels or LABELS_TRACEPARENT_KEY not in labels: + return None, None + traceparent = labels[LABELS_TRACEPARENT_KEY] + carrier = {TRACEPARENT_KEY: traceparent} + return str(traceparent), propagate.extract(carrier) + + def _traceparent_from_span(self, span: Span) -> Optional[str]: + carrier = {} + propagate.inject(carrier, context=trace.set_span_in_context(span)) + return carrier.get(TRACEPARENT_KEY) + + def end_span_on_success(self) -> None: + """ + End a span for a task run on success. + """ + if self.span: + self.span.set_status(Status(StatusCode.OK)) + self.span.end(time.time_ns()) + self.span = None + + def end_span_on_failure(self, terminal_message: Optional[str] = None) -> None: + """ + End a span for a task run on failure. + """ + if self.span: + self.span.set_status( + Status(StatusCode.ERROR, terminal_message or "Run failed") + ) + self.span.end(time.time_ns()) + self.span = None + + def record_exception(self, exc: BaseException) -> None: + """ + Record an exception on a span. + """ + if self.span: + self.span.record_exception(exc) + + def update_state(self, new_state: State) -> None: + """ + Update a span with the state of a task run. + """ + if self.span: + self.span.add_event( + new_state.name or new_state.type, + { + "prefect.state.message": new_state.message or "", + "prefect.state.type": new_state.type, + "prefect.state.name": new_state.name or new_state.type, + "prefect.state.id": str(new_state.id), + }, + ) + + def propagate_traceparent(self) -> Optional[KeyValueLabels]: + """ + Propagate a traceparent to a span. + """ + parent_flow_run_ctx = FlowRunContext.get() + + if parent_flow_run_ctx and parent_flow_run_ctx.flow_run: + if traceparent := parent_flow_run_ctx.flow_run.labels.get( + LABELS_TRACEPARENT_KEY + ): + carrier: KeyValueLabels = {TRACEPARENT_KEY: traceparent} + propagate.get_global_textmap().inject( + carrier={TRACEPARENT_KEY: traceparent}, + setter=OTELSetter(), + ) + return carrier + else: + if self.span: + carrier: KeyValueLabels = {} + propagate.get_global_textmap().inject( + carrier, + context=trace.set_span_in_context(self.span), + setter=OTELSetter(), + ) + return carrier diff --git a/src/prefect/testing/fixtures.py b/src/prefect/testing/fixtures.py index 545778427ac1..07352f872afc 100644 --- a/src/prefect/testing/fixtures.py +++ b/src/prefect/testing/fixtures.py @@ -27,6 +27,7 @@ from prefect.server.events.pipeline import EventsPipeline from prefect.settings import ( PREFECT_API_URL, + PREFECT_EXPERIMENTS_LINEAGE_EVENTS_ENABLED, PREFECT_SERVER_ALLOW_EPHEMERAL_MODE, PREFECT_SERVER_CSRF_PROTECTION_ENABLED, get_current_settings, @@ -452,3 +453,10 @@ def reset_worker_events(asserting_events_worker: EventsWorker): yield assert isinstance(asserting_events_worker._client, AssertingEventsClient) asserting_events_worker._client.events = [] + + +@pytest.fixture +def enable_lineage_events(): + """A fixture that ensures lineage events are enabled.""" + with temporary_settings(updates={PREFECT_EXPERIMENTS_LINEAGE_EVENTS_ENABLED: True}): + yield diff --git a/src/prefect/testing/standard_test_suites/blocks.py b/src/prefect/testing/standard_test_suites/blocks.py index 9fa2e53440c2..99b001ab023b 100644 --- a/src/prefect/testing/standard_test_suites/blocks.py +++ b/src/prefect/testing/standard_test_suites/blocks.py @@ -1,6 +1,5 @@ import re from abc import ABC, abstractmethod -from typing import Type from urllib.request import urlopen import pytest @@ -12,16 +11,16 @@ class BlockStandardTestSuite(ABC): @pytest.fixture @abstractmethod - def block(self) -> Type[Block]: + def block(self) -> type[Block]: pass - def test_has_a_description(self, block: Type[Block]): + def test_has_a_description(self, block: type[Block]): assert block.get_description() - def test_has_a_documentation_url(self, block: Type[Block]): + def test_has_a_documentation_url(self, block: type[Block]): assert block._documentation_url - def test_all_fields_have_a_description(self, block: Type[Block]): + def test_all_fields_have_a_description(self, block: type[Block]): for name, field in block.model_fields.items(): if Block.annotation_refers_to_block_class(field.annotation): # TODO: Block field descriptions aren't currently handled by the UI, so @@ -35,21 +34,28 @@ def test_all_fields_have_a_description(self, block: Type[Block]): "." ), f"{name} description on {block.__name__} does not end with a period" - def test_has_a_valid_code_example(self, block: Type[Block]): + def test_has_a_valid_code_example(self, block: type[Block]): code_example = block.get_code_example() assert code_example is not None, f"{block.__name__} is missing a code example" - import_pattern = rf"from .* import {block.__name__}" + + # Extract base name without generic parameters + base_name = block.__name__.partition("[")[0] + + # Check for proper import statement + import_pattern = rf"from .* import {base_name}" assert re.search(import_pattern, code_example) is not None, ( - f"The code example for {block.__name__} is missing an import statement" + f"The code example for {base_name} is missing an import statement" f" matching the pattern {import_pattern}" ) - block_load_pattern = rf'.* = {block.__name__}\.load\("BLOCK_NAME"\)' + + # Check for proper load statement + block_load_pattern = rf'.* = {base_name}\.load\("BLOCK_NAME"\)' assert re.search(block_load_pattern, code_example), ( - f"The code example for {block.__name__} is missing a .load statement" + f"The code example for {base_name} is missing a .load statement" f" matching the pattern {block_load_pattern}" ) - def test_has_a_valid_image(self, block: Type[Block]): + def test_has_a_valid_image(self, block: type[Block]): logo_url = block._logo_url assert ( logo_url is not None diff --git a/src/prefect/testing/utilities.py b/src/prefect/testing/utilities.py index d73cb66f0adf..068ecb3a8a5a 100644 --- a/src/prefect/testing/utilities.py +++ b/src/prefect/testing/utilities.py @@ -249,7 +249,7 @@ async def assert_uses_result_storage( ( storage if isinstance(storage, Block) - else await Block.load(storage, client=client) + else await Block.aload(storage, client=client) ), ) else: @@ -260,7 +260,7 @@ async def assert_uses_result_storage( ( storage if isinstance(storage, Block) - else await Block.load(storage, client=client) + else await Block.aload(storage, client=client) ), ) diff --git a/src/prefect/transactions.py b/src/prefect/transactions.py index 172b5c7d5b39..a426999fc2c0 100644 --- a/src/prefect/transactions.py +++ b/src/prefect/transactions.py @@ -23,7 +23,7 @@ MissingContextError, SerializationError, ) -from prefect.logging.loggers import get_logger, get_run_logger +from prefect.logging.loggers import LoggingAdapter, get_logger, get_run_logger from prefect.records import RecordStore from prefect.records.base import TransactionRecord from prefect.results import ( @@ -32,9 +32,9 @@ ResultStore, get_result_store, ) +from prefect.utilities._engine import get_hook_name from prefect.utilities.annotations import NotSet from prefect.utilities.collections import AutoEnum -from prefect.utilities.engine import _get_hook_name class IsolationLevel(AutoEnum): @@ -72,13 +72,13 @@ class Transaction(ContextModel): default_factory=list ) overwrite: bool = False - logger: Union[logging.Logger, logging.LoggerAdapter] = Field( + logger: Union[logging.Logger, LoggingAdapter] = Field( default_factory=partial(get_logger, "transactions") ) write_on_commit: bool = True _stored_values: Dict[str, Any] = PrivateAttr(default_factory=dict) _staged_value: Any = None - __var__: ContextVar = ContextVar("transaction") + __var__: ContextVar[Self] = ContextVar("transaction") def set(self, name: str, value: Any) -> None: """ @@ -209,7 +209,7 @@ def __enter__(self): self._token = self.__var__.set(self) return self - def __exit__(self, *exc_info): + def __exit__(self, *exc_info: Any): exc_type, exc_val, _ = exc_info if not self._token: raise RuntimeError( @@ -254,7 +254,7 @@ def begin(self): ): self.state = TransactionState.COMMITTED - def read(self) -> Union["BaseResult", ResultRecord, None]: + def read(self) -> Union["BaseResult[Any]", ResultRecord[Any], None]: if self.store and self.key: record = self.store.read(key=self.key) if isinstance(record, ResultRecord): @@ -354,8 +354,8 @@ def commit(self) -> bool: self.rollback() return False - def run_hook(self, hook, hook_type: str) -> None: - hook_name = _get_hook_name(hook) + def run_hook(self, hook: Callable[..., Any], hook_type: str) -> None: + hook_name = get_hook_name(hook) # Undocumented way to disable logging for a hook. Subject to change. should_log = getattr(hook, "log_on_run", True) @@ -379,8 +379,8 @@ def run_hook(self, hook, hook_type: str) -> None: def stage( self, value: Any, - on_rollback_hooks: Optional[List] = None, - on_commit_hooks: Optional[List] = None, + on_rollback_hooks: Optional[list[Callable[..., Any]]] = None, + on_commit_hooks: Optional[list[Callable[..., Any]]] = None, ) -> None: """ Stage a value to be committed later. @@ -441,7 +441,7 @@ def transaction( isolation_level: Optional[IsolationLevel] = None, overwrite: bool = False, write_on_commit: bool = True, - logger: Union[logging.Logger, logging.LoggerAdapter, None] = None, + logger: Optional[Union[logging.Logger, LoggingAdapter]] = None, ) -> Generator[Transaction, None, None]: """ A context manager for opening and managing a transaction. @@ -465,9 +465,9 @@ def transaction( store = get_result_store() try: - logger = logger or get_run_logger() + _logger: Union[logging.Logger, LoggingAdapter] = logger or get_run_logger() except MissingContextError: - logger = get_logger("transactions") + _logger = get_logger("transactions") with Transaction( key=key, @@ -476,6 +476,6 @@ def transaction( isolation_level=isolation_level, overwrite=overwrite, write_on_commit=write_on_commit, - logger=logger, + logger=_logger, ) as txn: yield txn diff --git a/src/prefect/types/__init__.py b/src/prefect/types/__init__.py index 934af32441f8..f36622f5a3df 100644 --- a/src/prefect/types/__init__.py +++ b/src/prefect/types/__init__.py @@ -3,7 +3,8 @@ from typing_extensions import Literal, TypeAlias import orjson import pydantic - +from pydantic_extra_types.pendulum_dt import DateTime as PydanticDateTime +from pydantic_extra_types.pendulum_dt import Date as PydanticDate from pydantic import ( BeforeValidator, Field, @@ -34,6 +35,8 @@ ), ] +DateTime: TypeAlias = PydanticDateTime +Date: TypeAlias = PydanticDate BANNED_CHARACTERS = ["/", "%", "&", ">", "<"] diff --git a/src/prefect/utilities/_engine.py b/src/prefect/utilities/_engine.py new file mode 100644 index 000000000000..c3a99676dcc4 --- /dev/null +++ b/src/prefect/utilities/_engine.py @@ -0,0 +1,96 @@ +"""Internal engine utilities""" + + +from collections.abc import Callable +from functools import partial +from typing import TYPE_CHECKING, Any, Union +from uuid import uuid4 + +from prefect.context import FlowRunContext +from prefect.flows import Flow +from prefect.tasks import Task, TaskRunNameCallbackWithParameters + + +def dynamic_key_for_task_run( + context: FlowRunContext, task: "Task[..., Any]", stable: bool = True +) -> Union[int, str]: + if ( + stable is False or context.detached + ): # this task is running on remote infrastructure + return str(uuid4()) + elif context.flow_run is None: # this is an autonomous task run + context.task_run_dynamic_keys[task.task_key] = getattr( + task, "dynamic_key", str(uuid4()) + ) + + elif task.task_key not in context.task_run_dynamic_keys: + context.task_run_dynamic_keys[task.task_key] = 0 + else: + dynamic_key = context.task_run_dynamic_keys[task.task_key] + if TYPE_CHECKING: + assert isinstance(dynamic_key, int) + context.task_run_dynamic_keys[task.task_key] = dynamic_key + 1 + + return context.task_run_dynamic_keys[task.task_key] + + +def resolve_custom_flow_run_name( + flow: "Flow[..., Any]", parameters: dict[str, Any] +) -> str: + if callable(flow.flow_run_name): + flow_run_name = flow.flow_run_name() + if not TYPE_CHECKING: + if not isinstance(flow_run_name, str): + raise TypeError( + f"Callable {flow.flow_run_name} for 'flow_run_name' returned type" + f" {type(flow_run_name).__name__} but a string is required." + ) + elif isinstance(flow.flow_run_name, str): + flow_run_name = flow.flow_run_name.format(**parameters) + else: + raise TypeError( + "Expected string or callable for 'flow_run_name'; got" + f" {type(flow.flow_run_name).__name__} instead." + ) + + return flow_run_name + + +def resolve_custom_task_run_name( + task: "Task[..., Any]", parameters: dict[str, Any] +) -> str: + if callable(task.task_run_name): + # If the callable accepts a 'parameters' kwarg, pass the entire parameters dict + if TaskRunNameCallbackWithParameters.is_callback_with_parameters( + task.task_run_name + ): + task_run_name = task.task_run_name(parameters=parameters) + else: + # If it doesn't expect parameters, call it without arguments + task_run_name = task.task_run_name() + + if not TYPE_CHECKING: + if not isinstance(task_run_name, str): + raise TypeError( + f"Callable {task.task_run_name} for 'task_run_name' returned type" + f" {type(task_run_name).__name__} but a string is required." + ) + elif isinstance(task.task_run_name, str): + task_run_name = task.task_run_name.format(**parameters) + else: + raise TypeError( + "Expected string or callable for 'task_run_name'; got" + f" {type(task.task_run_name).__name__} instead." + ) + + return task_run_name + + +def get_hook_name(hook: Callable[..., Any]) -> str: + return ( + hook.__name__ + if hasattr(hook, "__name__") + else ( + hook.func.__name__ if isinstance(hook, partial) else hook.__class__.__name__ + ) + ) diff --git a/src/prefect/utilities/annotations.py b/src/prefect/utilities/annotations.py index 6d9bd73ed475..2e264f334d74 100644 --- a/src/prefect/utilities/annotations.py +++ b/src/prefect/utilities/annotations.py @@ -1,33 +1,40 @@ import warnings -from abc import ABC -from collections import namedtuple -from typing import Generic, TypeVar +from operator import itemgetter +from typing import Any, cast -T = TypeVar("T") +from typing_extensions import Self, TypeVar +T = TypeVar("T", infer_variance=True) -class BaseAnnotation( - namedtuple("BaseAnnotation", field_names="value"), ABC, Generic[T] -): + +class BaseAnnotation(tuple[T]): """ Base class for Prefect annotation types. - Inherits from `namedtuple` for unpacking support in another tools. + Inherits from `tuple` for unpacking support in other tools. """ + __slots__ = () + + def __new__(cls, value: T) -> Self: + return super().__new__(cls, (value,)) + + # use itemgetter to minimise overhead, just like namedtuple generated code would + value: T = cast(T, property(itemgetter(0))) + def unwrap(self) -> T: - return self.value + return self[0] - def rewrap(self, value: T) -> "BaseAnnotation[T]": + def rewrap(self, value: T) -> Self: return type(self)(value) - def __eq__(self, other: "BaseAnnotation[T]") -> bool: + def __eq__(self, other: Any) -> bool: if type(self) is not type(other): return False - return self.unwrap() == other.unwrap() + return super().__eq__(other) def __repr__(self) -> str: - return f"{type(self).__name__}({self.value!r})" + return f"{type(self).__name__}({self[0]!r})" class unmapped(BaseAnnotation[T]): @@ -38,9 +45,9 @@ class unmapped(BaseAnnotation[T]): operation instead of being split. """ - def __getitem__(self, _) -> T: + def __getitem__(self, _: object) -> T: # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] # Internally, this acts as an infinite array where all items are the same value - return self.unwrap() + return super().__getitem__(0) class allow_failure(BaseAnnotation[T]): @@ -87,14 +94,14 @@ def unquote(self) -> T: # Backwards compatibility stub for `Quote` class -class Quote(quote): - def __init__(self, expr): +class Quote(quote[T]): + def __new__(cls, expr: T) -> Self: warnings.warn( "Use of `Quote` is deprecated. Use `quote` instead.", DeprecationWarning, stacklevel=2, ) - super().__init__(expr) + return super().__new__(cls, expr) class NotSet: diff --git a/src/prefect/utilities/asyncutils.py b/src/prefect/utilities/asyncutils.py index 3939632e1641..e2c1a2472eec 100644 --- a/src/prefect/utilities/asyncutils.py +++ b/src/prefect/utilities/asyncutils.py @@ -6,23 +6,12 @@ import inspect import threading import warnings -from concurrent.futures import ThreadPoolExecutor -from contextlib import asynccontextmanager -from contextvars import ContextVar, copy_context +from collections.abc import AsyncGenerator, Awaitable, Coroutine +from contextlib import AbstractAsyncContextManager, asynccontextmanager +from contextvars import ContextVar from functools import partial, wraps -from typing import ( - Any, - Awaitable, - Callable, - Coroutine, - Dict, - List, - Optional, - TypeVar, - Union, - cast, - overload, -) +from logging import Logger +from typing import TYPE_CHECKING, Any, Callable, NoReturn, Optional, Union, overload from uuid import UUID, uuid4 import anyio @@ -30,9 +19,18 @@ import anyio.from_thread import anyio.to_thread import sniffio -from typing_extensions import Literal, ParamSpec, TypeGuard +from typing_extensions import ( + Literal, + ParamSpec, + Self, + TypeAlias, + TypeGuard, + TypeVar, + TypeVarTuple, + Unpack, +) -from prefect._internal.concurrency.api import _cast_to_call, from_sync +from prefect._internal.concurrency.api import cast_to_call, from_sync from prefect._internal.concurrency.threads import ( get_run_sync_loop, in_run_sync_loop, @@ -41,62 +39,65 @@ T = TypeVar("T") P = ParamSpec("P") -R = TypeVar("R") +R = TypeVar("R", infer_variance=True) F = TypeVar("F", bound=Callable[..., Any]) Async = Literal[True] Sync = Literal[False] A = TypeVar("A", Async, Sync, covariant=True) +PosArgsT = TypeVarTuple("PosArgsT") + +_SyncOrAsyncCallable: TypeAlias = Callable[P, Union[R, Awaitable[R]]] # Global references to prevent garbage collection for `add_event_loop_shutdown_callback` -EVENT_LOOP_GC_REFS = {} +EVENT_LOOP_GC_REFS: dict[int, AsyncGenerator[None, Any]] = {} -PREFECT_THREAD_LIMITER: Optional[anyio.CapacityLimiter] = None RUNNING_IN_RUN_SYNC_LOOP_FLAG = ContextVar("running_in_run_sync_loop", default=False) RUNNING_ASYNC_FLAG = ContextVar("run_async", default=False) -BACKGROUND_TASKS: set[asyncio.Task] = set() -background_task_lock = threading.Lock() +BACKGROUND_TASKS: set[asyncio.Task[Any]] = set() +background_task_lock: threading.Lock = threading.Lock() # Thread-local storage to keep track of worker thread state _thread_local = threading.local() -logger = get_logger() +logger: Logger = get_logger() + + +_prefect_thread_limiter: Optional[anyio.CapacityLimiter] = None -def get_thread_limiter(): - global PREFECT_THREAD_LIMITER +def get_thread_limiter() -> anyio.CapacityLimiter: + global _prefect_thread_limiter - if PREFECT_THREAD_LIMITER is None: - PREFECT_THREAD_LIMITER = anyio.CapacityLimiter(250) + if _prefect_thread_limiter is None: + _prefect_thread_limiter = anyio.CapacityLimiter(250) - return PREFECT_THREAD_LIMITER + return _prefect_thread_limiter def is_async_fn( - func: Union[Callable[P, R], Callable[P, Awaitable[R]]], + func: _SyncOrAsyncCallable[P, R], ) -> TypeGuard[Callable[P, Awaitable[R]]]: """ Returns `True` if a function returns a coroutine. See https://github.com/microsoft/pyright/issues/2142 for an example use """ - while hasattr(func, "__wrapped__"): - func = func.__wrapped__ - + func = inspect.unwrap(func) return asyncio.iscoroutinefunction(func) -def is_async_gen_fn(func): +def is_async_gen_fn( + func: Callable[P, Any], +) -> TypeGuard[Callable[P, AsyncGenerator[Any, Any]]]: """ Returns `True` if a function is an async generator. """ - while hasattr(func, "__wrapped__"): - func = func.__wrapped__ - + func = inspect.unwrap(func) return inspect.isasyncgenfunction(func) -def create_task(coroutine: Coroutine) -> asyncio.Task: +def create_task(coroutine: Coroutine[Any, Any, R]) -> asyncio.Task[R]: """ Replacement for asyncio.create_task that will ensure that tasks aren't garbage collected before they complete. Allows for "fire and forget" @@ -122,68 +123,32 @@ def create_task(coroutine: Coroutine) -> asyncio.Task: return task -def _run_sync_in_new_thread(coroutine: Coroutine[Any, Any, T]) -> T: - """ - Note: this is an OLD implementation of `run_coro_as_sync` which liberally created - new threads and new loops. This works, but prevents sharing any objects - across coroutines, in particular httpx clients, which are very expensive to - instantiate. - - This is here for historical purposes and can be removed if/when it is no - longer needed for reference. - - --- - - Runs a coroutine from a synchronous context. A thread will be spawned to run - the event loop if necessary, which allows coroutines to run in environments - like Jupyter notebooks where the event loop runs on the main thread. - - Args: - coroutine: The coroutine to run. - - Returns: - The return value of the coroutine. - - Example: - Basic usage: ```python async def my_async_function(x: int) -> int: - return x + 1 - - run_sync(my_async_function(1)) ``` - """ +@overload +def run_coro_as_sync( + coroutine: Coroutine[Any, Any, R], + *, + force_new_thread: bool = ..., + wait_for_result: Literal[True] = ..., +) -> R: + ... - # ensure context variables are properly copied to the async frame - async def context_local_wrapper(): - """ - Wrapper that is submitted using copy_context().run to ensure - the RUNNING_ASYNC_FLAG mutations are tightly scoped to this coroutine's frame. - """ - token = RUNNING_ASYNC_FLAG.set(True) - try: - result = await coroutine - finally: - RUNNING_ASYNC_FLAG.reset(token) - return result - context = copy_context() - try: - loop = asyncio.get_running_loop() - except RuntimeError: - loop = None - - if loop and loop.is_running(): - with ThreadPoolExecutor() as executor: - future = executor.submit(context.run, asyncio.run, context_local_wrapper()) - result = cast(T, future.result()) - else: - result = context.run(asyncio.run, context_local_wrapper()) - return result +@overload +def run_coro_as_sync( + coroutine: Coroutine[Any, Any, R], + *, + force_new_thread: bool = ..., + wait_for_result: Literal[False] = False, +) -> R: + ... def run_coro_as_sync( - coroutine: Awaitable[R], + coroutine: Coroutine[Any, Any, R], + *, force_new_thread: bool = False, wait_for_result: bool = True, -) -> Union[R, None]: +) -> Optional[R]: """ Runs a coroutine from a synchronous context, as if it were a synchronous function. @@ -210,7 +175,7 @@ def run_coro_as_sync( The result of the coroutine if wait_for_result is True, otherwise None. """ - async def coroutine_wrapper() -> Union[R, None]: + async def coroutine_wrapper() -> Optional[R]: """ Set flags so that children (and grandchildren...) of this task know they are running in a new thread and do not try to run on the run_sync thread, which would cause a @@ -231,12 +196,13 @@ async def coroutine_wrapper() -> Union[R, None]: # that is running in the run_sync loop, we need to run this coroutine in a # new thread if in_run_sync_loop() or RUNNING_IN_RUN_SYNC_LOOP_FLAG.get() or force_new_thread: - return from_sync.call_in_new_thread(coroutine_wrapper) + result = from_sync.call_in_new_thread(coroutine_wrapper) + return result # otherwise, we can run the coroutine in the run_sync loop # and wait for the result else: - call = _cast_to_call(coroutine_wrapper) + call = cast_to_call(coroutine_wrapper) runner = get_run_sync_loop() runner.submit(call) try: @@ -249,8 +215,8 @@ async def coroutine_wrapper() -> Union[R, None]: async def run_sync_in_worker_thread( - __fn: Callable[..., T], *args: Any, **kwargs: Any -) -> T: + __fn: Callable[P, R], *args: P.args, **kwargs: P.kwargs +) -> R: """ Runs a sync function in a new worker thread so that the main thread's event loop is not blocked. @@ -274,14 +240,14 @@ async def run_sync_in_worker_thread( RUNNING_ASYNC_FLAG.reset(token) -def call_with_mark(call): +def call_with_mark(call: Callable[..., R]) -> R: mark_as_worker_thread() return call() def run_async_from_worker_thread( - __fn: Callable[..., Awaitable[T]], *args: Any, **kwargs: Any -) -> T: + __fn: Callable[P, Awaitable[R]], *args: P.args, **kwargs: P.kwargs +) -> R: """ Runs an async function in the main thread's event loop, blocking the worker thread until completion @@ -290,11 +256,13 @@ def run_async_from_worker_thread( return anyio.from_thread.run(call) -def run_async_in_new_loop(__fn: Callable[..., Awaitable[T]], *args: Any, **kwargs: Any): +def run_async_in_new_loop( + __fn: Callable[P, Awaitable[R]], *args: P.args, **kwargs: P.kwargs +) -> R: return anyio.run(partial(__fn, *args, **kwargs)) -def mark_as_worker_thread(): +def mark_as_worker_thread() -> None: _thread_local.is_worker_thread = True @@ -312,23 +280,9 @@ def in_async_main_thread() -> bool: return not in_async_worker_thread() -@overload -def sync_compatible( - async_fn: Callable[..., Coroutine[Any, Any, R]], -) -> Callable[..., R]: - ... - - -@overload def sync_compatible( - async_fn: Callable[..., Coroutine[Any, Any, R]], -) -> Callable[..., Coroutine[Any, Any, R]]: - ... - - -def sync_compatible( - async_fn: Callable[..., Coroutine[Any, Any, R]], -) -> Callable[..., Union[R, Coroutine[Any, Any, R]]]: + async_fn: Callable[P, Coroutine[Any, Any, R]], +) -> Callable[P, Union[R, Coroutine[Any, Any, R]]]: """ Converts an async function into a dual async and sync function. @@ -393,7 +347,7 @@ async def ctx_call(): if _sync is True: return run_coro_as_sync(ctx_call()) - elif _sync is False or RUNNING_ASYNC_FLAG.get() or is_async: + elif RUNNING_ASYNC_FLAG.get() or is_async: return ctx_call() else: return run_coro_as_sync(ctx_call()) @@ -409,8 +363,24 @@ async def ctx_call(): return wrapper +@overload +def asyncnullcontext( + value: None = None, *args: Any, **kwargs: Any +) -> AbstractAsyncContextManager[None, None]: + ... + + +@overload +def asyncnullcontext( + value: R, *args: Any, **kwargs: Any +) -> AbstractAsyncContextManager[R, None]: + ... + + @asynccontextmanager -async def asyncnullcontext(value=None, *args, **kwargs): +async def asyncnullcontext( + value: Optional[R] = None, *args: Any, **kwargs: Any +) -> AsyncGenerator[Any, Optional[R]]: yield value @@ -426,7 +396,7 @@ def sync(__async_fn: Callable[P, Awaitable[T]], *args: P.args, **kwargs: P.kwarg "`sync` called from an asynchronous context; " "you should `await` the async function directly instead." ) - with anyio.start_blocking_portal() as portal: + with anyio.from_thread.start_blocking_portal() as portal: return portal.call(partial(__async_fn, *args, **kwargs)) elif in_async_worker_thread(): # In a sync context but we can access the event loop thread; send the async @@ -438,7 +408,9 @@ def sync(__async_fn: Callable[P, Awaitable[T]], *args: P.args, **kwargs: P.kwarg return run_async_in_new_loop(__async_fn, *args, **kwargs) -async def add_event_loop_shutdown_callback(coroutine_fn: Callable[[], Awaitable]): +async def add_event_loop_shutdown_callback( + coroutine_fn: Callable[[], Awaitable[Any]], +) -> None: """ Adds a callback to the given callable on event loop closure. The callable must be a coroutine function. It will be awaited when the current event loop is shutting @@ -454,7 +426,7 @@ async def add_event_loop_shutdown_callback(coroutine_fn: Callable[[], Awaitable] loop is about to close. """ - async def on_shutdown(key): + async def on_shutdown(key: int) -> AsyncGenerator[None, Any]: # It appears that EVENT_LOOP_GC_REFS is somehow being garbage collected early. # We hold a reference to it so as to preserve it, at least for the lifetime of # this coroutine. See the issue below for the initial report/discussion: @@ -493,7 +465,7 @@ class GatherTaskGroup(anyio.abc.TaskGroup): """ A task group that gathers results. - AnyIO does not include support `gather`. This class extends the `TaskGroup` + AnyIO does not include `gather` support. This class extends the `TaskGroup` interface to allow simple gathering. See https://github.com/agronholm/anyio/issues/100 @@ -502,21 +474,31 @@ class GatherTaskGroup(anyio.abc.TaskGroup): """ def __init__(self, task_group: anyio.abc.TaskGroup): - self._results: Dict[UUID, Any] = {} + self._results: dict[UUID, Any] = {} # The concrete task group implementation to use self._task_group: anyio.abc.TaskGroup = task_group - async def _run_and_store(self, key, fn, args): + async def _run_and_store( + self, + key: UUID, + fn: Callable[[Unpack[PosArgsT]], Awaitable[Any]], + *args: Unpack[PosArgsT], + ) -> None: self._results[key] = await fn(*args) - def start_soon(self, fn, *args) -> UUID: + def start_soon( # pyright: ignore[reportIncompatibleMethodOverride] + self, + func: Callable[[Unpack[PosArgsT]], Awaitable[Any]], + *args: Unpack[PosArgsT], + name: object = None, + ) -> UUID: key = uuid4() # Put a placeholder in-case the result is retrieved earlier self._results[key] = GatherIncomplete - self._task_group.start_soon(self._run_and_store, key, fn, args) + self._task_group.start_soon(self._run_and_store, key, func, *args, name=name) return key - async def start(self, fn, *args): + async def start(self, func: object, *args: object, name: object = None) -> NoReturn: """ Since `start` returns the result of `task_status.started()` but here we must return the key instead, we just won't support this method for now. @@ -532,11 +514,11 @@ def get_result(self, key: UUID) -> Any: ) return result - async def __aenter__(self): + async def __aenter__(self) -> Self: await self._task_group.__aenter__() return self - async def __aexit__(self, *tb): + async def __aexit__(self, *tb: Any) -> Optional[bool]: try: retval = await self._task_group.__aexit__(*tb) return retval @@ -552,14 +534,14 @@ def create_gather_task_group() -> GatherTaskGroup: return GatherTaskGroup(anyio.create_task_group()) -async def gather(*calls: Callable[[], Coroutine[Any, Any, T]]) -> List[T]: +async def gather(*calls: Callable[[], Coroutine[Any, Any, T]]) -> list[T]: """ Run calls concurrently and gather their results. Unlike `asyncio.gather` this expects to receive _callables_ not _coroutines_. This matches `anyio` semantics. """ - keys = [] + keys: list[UUID] = [] async with create_gather_task_group() as tg: for call in calls: keys.append(tg.start_soon(call)) @@ -567,19 +549,23 @@ async def gather(*calls: Callable[[], Coroutine[Any, Any, T]]) -> List[T]: class LazySemaphore: - def __init__(self, initial_value_func): - self._semaphore = None + def __init__(self, initial_value_func: Callable[[], int]) -> None: + self._semaphore: Optional[asyncio.Semaphore] = None self._initial_value_func = initial_value_func - async def __aenter__(self): + async def __aenter__(self) -> asyncio.Semaphore: self._initialize_semaphore() + if TYPE_CHECKING: + assert self._semaphore is not None await self._semaphore.__aenter__() return self._semaphore - async def __aexit__(self, exc_type, exc, tb): - await self._semaphore.__aexit__(exc_type, exc, tb) + async def __aexit__(self, *args: Any) -> None: + if TYPE_CHECKING: + assert self._semaphore is not None + await self._semaphore.__aexit__(*args) - def _initialize_semaphore(self): + def _initialize_semaphore(self) -> None: if self._semaphore is None: initial_value = self._initial_value_func() self._semaphore = asyncio.Semaphore(initial_value) diff --git a/src/prefect/utilities/callables.py b/src/prefect/utilities/callables.py index 382f5cd6a224..9489ec25f061 100644 --- a/src/prefect/utilities/callables.py +++ b/src/prefect/utilities/callables.py @@ -6,14 +6,17 @@ import importlib.util import inspect import warnings +from collections import OrderedDict +from collections.abc import Iterable from functools import partial +from logging import Logger from pathlib import Path -from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple +from typing import Any, Callable, Optional, Union, cast -import cloudpickle +import cloudpickle # type: ignore # no stubs available import pydantic from griffe import Docstring, DocstringSectionKind, Parser, parse -from typing_extensions import Literal +from typing_extensions import Literal, TypeVar from prefect._internal.pydantic.v1_schema import has_v1_type_as_param from prefect._internal.pydantic.v2_schema import ( @@ -32,15 +35,17 @@ from prefect.utilities.collections import isiterable from prefect.utilities.importtools import safe_load_namespace -logger = get_logger(__name__) +logger: Logger = get_logger(__name__) + +R = TypeVar("R", infer_variance=True) def get_call_parameters( - fn: Callable, - call_args: Tuple[Any, ...], - call_kwargs: Dict[str, Any], + fn: Callable[..., Any], + call_args: tuple[Any, ...], + call_kwargs: dict[str, Any], apply_defaults: bool = True, -) -> Dict[str, Any]: +) -> dict[str, Any]: """ Bind a call to a function to get parameter/value mapping. Default values on the signature will be included if not overridden. @@ -57,7 +62,7 @@ def get_call_parameters( function """ if hasattr(fn, "__prefect_self__"): - call_args = (fn.__prefect_self__,) + call_args + call_args = (getattr(fn, "__prefect_self__"), *call_args) try: bound_signature = inspect.signature(fn).bind(*call_args, **call_kwargs) @@ -74,14 +79,14 @@ def get_call_parameters( def get_parameter_defaults( - fn: Callable, -) -> Dict[str, Any]: + fn: Callable[..., Any], +) -> dict[str, Any]: """ Get default parameter values for a callable. """ signature = inspect.signature(fn) - parameter_defaults = {} + parameter_defaults: dict[str, Any] = {} for name, param in signature.parameters.items(): if param.default is not signature.empty: @@ -91,8 +96,8 @@ def get_parameter_defaults( def explode_variadic_parameter( - fn: Callable, parameters: Dict[str, Any] -) -> Dict[str, Any]: + fn: Callable[..., Any], parameters: dict[str, Any] +) -> dict[str, Any]: """ Given a parameter dictionary, move any parameters stored in a variadic keyword argument parameter (i.e. **kwargs) into the top level. @@ -125,8 +130,8 @@ def foo(a, b, **kwargs): def collapse_variadic_parameters( - fn: Callable, parameters: Dict[str, Any] -) -> Dict[str, Any]: + fn: Callable[..., Any], parameters: dict[str, Any] +) -> dict[str, Any]: """ Given a parameter dictionary, move any parameters stored not present in the signature into the variadic keyword argument. @@ -151,50 +156,47 @@ def foo(a, b, **kwargs): missing_parameters = set(parameters.keys()) - set(signature_parameters.keys()) - if not variadic_key and missing_parameters: + if not missing_parameters: + # no missing parameters, return parameters unchanged + return parameters + + if not variadic_key: raise ValueError( f"Signature for {fn} does not include any variadic keyword argument " "but parameters were given that are not present in the signature." ) - if variadic_key and not missing_parameters: - # variadic key is present but no missing parameters, return parameters unchanged - return parameters - new_parameters = parameters.copy() - if variadic_key: - new_parameters[variadic_key] = {} - - for key in missing_parameters: - new_parameters[variadic_key][key] = new_parameters.pop(key) - + new_parameters[variadic_key] = { + key: new_parameters.pop(key) for key in missing_parameters + } return new_parameters def parameters_to_args_kwargs( - fn: Callable, - parameters: Dict[str, Any], -) -> Tuple[Tuple[Any, ...], Dict[str, Any]]: + fn: Callable[..., Any], + parameters: dict[str, Any], +) -> tuple[tuple[Any, ...], dict[str, Any]]: """ Convert a `parameters` dictionary to positional and keyword arguments The function _must_ have an identical signature to the original function or this will return an empty tuple and dict. """ - function_params = dict(inspect.signature(fn).parameters).keys() + function_params = inspect.signature(fn).parameters.keys() # Check for parameters that are not present in the function signature unknown_params = parameters.keys() - function_params if unknown_params: raise SignatureMismatchError.from_bad_params( - list(function_params), list(parameters.keys()) + list(function_params), list(parameters) ) bound_signature = inspect.signature(fn).bind_partial() - bound_signature.arguments = parameters + bound_signature.arguments = OrderedDict(parameters) return bound_signature.args, bound_signature.kwargs -def call_with_parameters(fn: Callable, parameters: Dict[str, Any]): +def call_with_parameters(fn: Callable[..., R], parameters: dict[str, Any]) -> R: """ Call a function with parameters extracted with `get_call_parameters` @@ -207,7 +209,7 @@ def call_with_parameters(fn: Callable, parameters: Dict[str, Any]): def cloudpickle_wrapped_call( - __fn: Callable, *args: Any, **kwargs: Any + __fn: Callable[..., Any], *args: Any, **kwargs: Any ) -> Callable[[], bytes]: """ Serializes a function call using cloudpickle then returns a callable which will @@ -217,18 +219,18 @@ def cloudpickle_wrapped_call( built-in pickler (e.g. `anyio.to_process` and `multiprocessing`) but may require a wider range of pickling support. """ - payload = cloudpickle.dumps((__fn, args, kwargs)) + payload = cloudpickle.dumps((__fn, args, kwargs)) # type: ignore # no stubs available return partial(_run_serialized_call, payload) -def _run_serialized_call(payload) -> bytes: +def _run_serialized_call(payload: bytes) -> bytes: """ Defined at the top-level so it can be pickled by the Python pickler. Used by `cloudpickle_wrapped_call`. """ fn, args, kwargs = cloudpickle.loads(payload) retval = fn(*args, **kwargs) - return cloudpickle.dumps(retval) + return cloudpickle.dumps(retval) # type: ignore # no stubs available class ParameterSchema(pydantic.BaseModel): @@ -236,18 +238,18 @@ class ParameterSchema(pydantic.BaseModel): title: Literal["Parameters"] = "Parameters" type: Literal["object"] = "object" - properties: Dict[str, Any] = pydantic.Field(default_factory=dict) - required: List[str] = pydantic.Field(default_factory=list) - definitions: Dict[str, Any] = pydantic.Field(default_factory=dict) + properties: dict[str, Any] = pydantic.Field(default_factory=dict) + required: list[str] = pydantic.Field(default_factory=list) + definitions: dict[str, Any] = pydantic.Field(default_factory=dict) - def model_dump_for_openapi(self) -> Dict[str, Any]: + def model_dump_for_openapi(self) -> dict[str, Any]: result = self.model_dump(mode="python", exclude_none=True) if "required" in result and not result["required"]: del result["required"] return result -def parameter_docstrings(docstring: Optional[str]) -> Dict[str, str]: +def parameter_docstrings(docstring: Optional[str]) -> dict[str, str]: """ Given a docstring in Google docstring format, parse the parameter section and return a dictionary that maps parameter names to docstring. @@ -258,7 +260,7 @@ def parameter_docstrings(docstring: Optional[str]) -> Dict[str, str]: Returns: Mapping from parameter names to docstrings. """ - param_docstrings = {} + param_docstrings: dict[str, str] = {} if not docstring: return param_docstrings @@ -279,9 +281,9 @@ def process_v1_params( param: inspect.Parameter, *, position: int, - docstrings: Dict[str, str], - aliases: Dict, -) -> Tuple[str, Any, "pydantic.Field"]: + docstrings: dict[str, str], + aliases: dict[str, str], +) -> tuple[str, Any, Any]: # Pydantic model creation will fail if names collide with the BaseModel type if hasattr(pydantic.BaseModel, param.name): name = param.name + "__" @@ -289,13 +291,13 @@ def process_v1_params( else: name = param.name - type_ = Any if param.annotation is inspect._empty else param.annotation + type_ = Any if param.annotation is inspect.Parameter.empty else param.annotation with warnings.catch_warnings(): warnings.filterwarnings( "ignore", category=pydantic.warnings.PydanticDeprecatedSince20 ) - field = pydantic.Field( + field: Any = pydantic.Field( # type: ignore # this uses the v1 signature, not v2 default=... if param.default is param.empty else param.default, title=param.name, description=docstrings.get(param.name, None), @@ -305,19 +307,24 @@ def process_v1_params( return name, type_, field -def create_v1_schema(name_: str, model_cfg, **model_fields): +def create_v1_schema( + name_: str, model_cfg: type[Any], model_fields: Optional[dict[str, Any]] = None +) -> dict[str, Any]: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", category=pydantic.warnings.PydanticDeprecatedSince20 ) - model: "pydantic.BaseModel" = pydantic.create_model( - name_, __config__=model_cfg, **model_fields + model_fields = model_fields or {} + model: type[pydantic.BaseModel] = pydantic.create_model( # type: ignore # this uses the v1 signature, not v2 + name_, + __config__=model_cfg, # type: ignore # this uses the v1 signature, not v2 + **model_fields, ) - return model.schema(by_alias=True) + return model.schema(by_alias=True) # type: ignore # this uses the v1 signature, not v2 -def parameter_schema(fn: Callable) -> ParameterSchema: +def parameter_schema(fn: Callable[..., Any]) -> ParameterSchema: """Given a function, generates an OpenAPI-compatible description of the function's arguments, including: - name @@ -378,7 +385,7 @@ def parameter_schema_from_entrypoint(entrypoint: str) -> ParameterSchema: def generate_parameter_schema( - signature: inspect.Signature, docstrings: Dict[str, str] + signature: inspect.Signature, docstrings: dict[str, str] ) -> ParameterSchema: """ Generate a parameter schema from a function signature and docstrings. @@ -394,22 +401,22 @@ def generate_parameter_schema( ParameterSchema: The parameter schema. """ - model_fields = {} - aliases = {} + model_fields: dict[str, Any] = {} + aliases: dict[str, str] = {} if not has_v1_type_as_param(signature): - create_schema = create_v2_schema + config = pydantic.ConfigDict(arbitrary_types_allowed=True) + + create_schema = partial(create_v2_schema, model_cfg=config) process_params = process_v2_params - config = pydantic.ConfigDict(arbitrary_types_allowed=True) else: - create_schema = create_v1_schema - process_params = process_v1_params class ModelConfig: arbitrary_types_allowed = True - config = ModelConfig + create_schema = partial(create_v1_schema, model_cfg=ModelConfig) + process_params = process_v1_params for position, param in enumerate(signature.parameters.values()): name, type_, field = process_params( @@ -418,24 +425,26 @@ class ModelConfig: # Generate a Pydantic model at each step so we can check if this parameter # type supports schema generation try: - create_schema("CheckParameter", model_cfg=config, **{name: (type_, field)}) + create_schema("CheckParameter", model_fields={name: (type_, field)}) except (ValueError, TypeError): # This field's type is not valid for schema creation, update it to `Any` type_ = Any model_fields[name] = (type_, field) # Generate the final model and schema - schema = create_schema("Parameters", model_cfg=config, **model_fields) + schema = create_schema("Parameters", model_fields=model_fields) return ParameterSchema(**schema) -def raise_for_reserved_arguments(fn: Callable, reserved_arguments: Iterable[str]): +def raise_for_reserved_arguments( + fn: Callable[..., Any], reserved_arguments: Iterable[str] +) -> None: """Raise a ReservedArgumentError if `fn` has any parameters that conflict with the names contained in `reserved_arguments`.""" - function_paremeters = inspect.signature(fn).parameters + function_parameters = inspect.signature(fn).parameters for argument in reserved_arguments: - if argument in function_paremeters: + if argument in function_parameters: raise ReservedArgumentError( f"{argument!r} is a reserved argument name and cannot be used." ) @@ -479,7 +488,7 @@ def _generate_signature_from_source( ) if func_def is None: raise ValueError(f"Function {func_name} not found in source code") - parameters = [] + parameters: list[inspect.Parameter] = [] # Handle annotations for positional only args e.g. def func(a, /, b, c) for arg in func_def.args.posonlyargs: @@ -642,8 +651,8 @@ def _get_docstring_from_source(source_code: str, func_name: str) -> Optional[str def expand_mapping_parameters( - func: Callable, parameters: Dict[str, Any] -) -> List[Dict[str, Any]]: + func: Callable[..., Any], parameters: dict[str, Any] +) -> list[dict[str, Any]]: """ Generates a list of call parameters to be used for individual calls in a mapping operation. @@ -653,29 +662,29 @@ def expand_mapping_parameters( parameters: A dictionary of parameters with iterables to be mapped over Returns: - List: A list of dictionaries to be used as parameters for each + list: A list of dictionaries to be used as parameters for each call in the mapping operation """ # Ensure that any parameters in kwargs are expanded before this check parameters = explode_variadic_parameter(func, parameters) - iterable_parameters = {} - static_parameters = {} - annotated_parameters = {} + iterable_parameters: dict[str, list[Any]] = {} + static_parameters: dict[str, Any] = {} + annotated_parameters: dict[str, Union[allow_failure[Any], quote[Any]]] = {} for key, val in parameters.items(): if isinstance(val, (allow_failure, quote)): # Unwrap annotated parameters to determine if they are iterable annotated_parameters[key] = val - val = val.unwrap() + val: Any = val.unwrap() if isinstance(val, unmapped): - static_parameters[key] = val.value + static_parameters[key] = cast(unmapped[Any], val).value elif isiterable(val): iterable_parameters[key] = list(val) else: static_parameters[key] = val - if not len(iterable_parameters): + if not iterable_parameters: raise MappingMissingIterable( "No iterable parameters were received. Parameters for map must " f"include at least one iterable. Parameters: {parameters}" @@ -693,7 +702,7 @@ def expand_mapping_parameters( map_length = list(lengths)[0] - call_parameters_list = [] + call_parameters_list: list[dict[str, Any]] = [] for i in range(map_length): call_parameters = {key: value[i] for key, value in iterable_parameters.items()} call_parameters.update({key: value for key, value in static_parameters.items()}) diff --git a/src/prefect/utilities/collections.py b/src/prefect/utilities/collections.py index 5b2ae453c67e..3588b6f48a73 100644 --- a/src/prefect/utilities/collections.py +++ b/src/prefect/utilities/collections.py @@ -6,33 +6,40 @@ import itertools import types import warnings -from collections import OrderedDict, defaultdict -from collections.abc import Iterator as IteratorABC -from collections.abc import Sequence -from dataclasses import fields, is_dataclass -from enum import Enum, auto -from typing import ( - Any, +from collections import OrderedDict +from collections.abc import ( Callable, - Dict, + Collection, Generator, Hashable, Iterable, - List, - Optional, + Iterator, + Sequence, Set, - Tuple, - Type, - TypeVar, +) +from dataclasses import fields, is_dataclass, replace +from enum import Enum, auto +from typing import ( + TYPE_CHECKING, + Any, + Literal, + Optional, Union, cast, + overload, ) from unittest.mock import Mock import pydantic +from typing_extensions import TypeAlias, TypeVar # Quote moved to `prefect.utilities.annotations` but preserved here for compatibility -from prefect.utilities.annotations import BaseAnnotation, Quote, quote # noqa +from prefect.utilities.annotations import BaseAnnotation as BaseAnnotation +from prefect.utilities.annotations import Quote as Quote +from prefect.utilities.annotations import quote as quote + +if TYPE_CHECKING: + pass class AutoEnum(str, Enum): @@ -55,11 +62,12 @@ class MyEnum(AutoEnum): ``` """ - def _generate_next_value_(name, start, count, last_values): + @staticmethod + def _generate_next_value_(name: str, *_: object, **__: object) -> str: return name @staticmethod - def auto(): + def auto() -> str: """ Exposes `enum.auto()` to avoid requiring a second import to use `AutoEnum` """ @@ -70,12 +78,15 @@ def __repr__(self) -> str: KT = TypeVar("KT") -VT = TypeVar("VT") +VT = TypeVar("VT", infer_variance=True) +VT1 = TypeVar("VT1", infer_variance=True) +VT2 = TypeVar("VT2", infer_variance=True) +R = TypeVar("R", infer_variance=True) +NestedDict: TypeAlias = dict[KT, Union[VT, "NestedDict[KT, VT]"]] +HashableT = TypeVar("HashableT", bound=Hashable) -def dict_to_flatdict( - dct: Dict[KT, Union[Any, Dict[KT, Any]]], _parent: Tuple[KT, ...] = None -) -> Dict[Tuple[KT, ...], Any]: +def dict_to_flatdict(dct: NestedDict[KT, VT]) -> dict[tuple[KT, ...], VT]: """Converts a (nested) dictionary to a flattened representation. Each key of the flat dict will be a CompoundKey tuple containing the "chain of keys" @@ -83,28 +94,28 @@ def dict_to_flatdict( Args: dct (dict): The dictionary to flatten - _parent (Tuple, optional): The current parent for recursion Returns: A flattened dict of the same type as dct """ - typ = cast(Type[Dict[Tuple[KT, ...], Any]], type(dct)) - items: List[Tuple[Tuple[KT, ...], Any]] = [] - parent = _parent or tuple() - - for k, v in dct.items(): - k_parent = tuple(parent + (k,)) - # if v is a non-empty dict, recurse - if isinstance(v, dict) and v: - items.extend(dict_to_flatdict(v, _parent=k_parent).items()) - else: - items.append((k_parent, v)) - return typ(items) + def flatten( + dct: NestedDict[KT, VT], _parent: tuple[KT, ...] = () + ) -> Iterator[tuple[tuple[KT, ...], VT]]: + parent = _parent or () + for k, v in dct.items(): + k_parent = (*parent, k) + # if v is a non-empty dict, recurse + if isinstance(v, dict) and v: + yield from flatten(cast(NestedDict[KT, VT], v), _parent=k_parent) + else: + yield (k_parent, cast(VT, v)) -def flatdict_to_dict( - dct: Dict[Tuple[KT, ...], VT], -) -> Dict[KT, Union[VT, Dict[KT, VT]]]: + type_ = cast(type[dict[tuple[KT, ...], VT]], type(dct)) + return type_(flatten(dct)) + + +def flatdict_to_dict(dct: dict[tuple[KT, ...], VT]) -> NestedDict[KT, VT]: """Converts a flattened dictionary back to a nested dictionary. Args: @@ -114,16 +125,26 @@ def flatdict_to_dict( Returns A nested dict of the same type as dct """ - typ = type(dct) - result = cast(Dict[KT, Union[VT, Dict[KT, VT]]], typ()) + + type_ = cast(type[NestedDict[KT, VT]], type(dct)) + + def new(type_: type[NestedDict[KT, VT]] = type_) -> NestedDict[KT, VT]: + return type_() + + result = new() for key_tuple, value in dct.items(): - current_dict = result - for prefix_key in key_tuple[:-1]: + current = result + *prefix_keys, last_key = key_tuple + for prefix_key in prefix_keys: # Build nested dictionaries up for the current key tuple - # Use `setdefault` in case the nested dict has already been created - current_dict = current_dict.setdefault(prefix_key, typ()) # type: ignore + try: + current = cast(NestedDict[KT, VT], current[prefix_key]) + except KeyError: + new_dict = current[prefix_key] = new() + current = new_dict + # Set the value - current_dict[key_tuple[-1]] = value + current[last_key] = value return result @@ -148,9 +169,9 @@ def isiterable(obj: Any) -> bool: return not isinstance(obj, (str, bytes, io.IOBase)) -def ensure_iterable(obj: Union[T, Iterable[T]]) -> Iterable[T]: +def ensure_iterable(obj: Union[T, Iterable[T]]) -> Collection[T]: if isinstance(obj, Sequence) or isinstance(obj, Set): - return obj + return cast(Collection[T], obj) obj = cast(T, obj) # No longer in the iterable case return [obj] @@ -160,9 +181,9 @@ def listrepr(objs: Iterable[Any], sep: str = " ") -> str: def extract_instances( - objects: Iterable, - types: Union[Type[T], Tuple[Type[T], ...]] = object, -) -> Union[List[T], Dict[Type[T], T]]: + objects: Iterable[Any], + types: Union[type[T], tuple[type[T], ...]] = object, +) -> Union[list[T], dict[type[T], list[T]]]: """ Extract objects from a file and returns a dict of type -> instances @@ -174,26 +195,27 @@ def extract_instances( If a single type is given: a list of instances of that type If a tuple of types is given: a mapping of type to a list of instances """ - types = ensure_iterable(types) + types_collection = ensure_iterable(types) # Create a mapping of type -> instance from the exec values - ret = defaultdict(list) + ret: dict[type[T], list[Any]] = {} for o in objects: # We iterate here so that the key is the passed type rather than type(o) - for type_ in types: + for type_ in types_collection: if isinstance(o, type_): - ret[type_].append(o) + ret.setdefault(type_, []).append(o) - if len(types) == 1: - return ret[types[0]] + if len(types_collection) == 1: + [type_] = types_collection + return ret[type_] return ret def batched_iterable( iterable: Iterable[T], size: int -) -> Generator[Tuple[T, ...], None, None]: +) -> Generator[tuple[T, ...], None, None]: """ Yield batches of a certain size from an iterable @@ -221,15 +243,86 @@ class StopVisiting(BaseException): """ +@overload +def visit_collection( + expr: Any, + visit_fn: Callable[[Any, dict[str, VT]], Any], + *, + return_data: Literal[True] = ..., + max_depth: int = ..., + context: dict[str, VT] = ..., + remove_annotations: bool = ..., + _seen: Optional[set[int]] = ..., +) -> Any: + ... + + +@overload +def visit_collection( + expr: Any, + visit_fn: Callable[[Any], Any], + *, + return_data: Literal[True] = ..., + max_depth: int = ..., + context: None = None, + remove_annotations: bool = ..., + _seen: Optional[set[int]] = ..., +) -> Any: + ... + + +@overload +def visit_collection( + expr: Any, + visit_fn: Callable[[Any, dict[str, VT]], Any], + *, + return_data: bool = ..., + max_depth: int = ..., + context: dict[str, VT] = ..., + remove_annotations: bool = ..., + _seen: Optional[set[int]] = ..., +) -> Optional[Any]: + ... + + +@overload +def visit_collection( + expr: Any, + visit_fn: Callable[[Any], Any], + *, + return_data: bool = ..., + max_depth: int = ..., + context: None = None, + remove_annotations: bool = ..., + _seen: Optional[set[int]] = ..., +) -> Optional[Any]: + ... + + +@overload +def visit_collection( + expr: Any, + visit_fn: Callable[[Any, dict[str, VT]], Any], + *, + return_data: Literal[False] = False, + max_depth: int = ..., + context: dict[str, VT] = ..., + remove_annotations: bool = ..., + _seen: Optional[set[int]] = ..., +) -> None: + ... + + def visit_collection( expr: Any, - visit_fn: Union[Callable[[Any, Optional[dict]], Any], Callable[[Any], Any]], + visit_fn: Union[Callable[[Any, dict[str, VT]], Any], Callable[[Any], Any]], + *, return_data: bool = False, max_depth: int = -1, - context: Optional[dict] = None, + context: Optional[dict[str, VT]] = None, remove_annotations: bool = False, - _seen: Optional[Set[int]] = None, -) -> Any: + _seen: Optional[set[int]] = None, +) -> Optional[Any]: """ Visits and potentially transforms every element of an arbitrary Python collection. @@ -289,24 +382,39 @@ def visit_collection( if _seen is None: _seen = set() - def visit_nested(expr): - # Utility for a recursive call, preserving options and updating the depth. - return visit_collection( - expr, - visit_fn=visit_fn, - return_data=return_data, - remove_annotations=remove_annotations, - max_depth=max_depth - 1, - # Copy the context on nested calls so it does not "propagate up" - context=context.copy() if context is not None else None, - _seen=_seen, - ) - - def visit_expression(expr): - if context is not None: - return visit_fn(expr, context) - else: - return visit_fn(expr) + if context is not None: + _callback = cast(Callable[[Any, dict[str, VT]], Any], visit_fn) + + def visit_nested(expr: Any) -> Optional[Any]: + return visit_collection( + expr, + _callback, + return_data=return_data, + remove_annotations=remove_annotations, + max_depth=max_depth - 1, + # Copy the context on nested calls so it does not "propagate up" + context=context.copy(), + _seen=_seen, + ) + + def visit_expression(expr: Any) -> Any: + return _callback(expr, context) + else: + _callback = cast(Callable[[Any], Any], visit_fn) + + def visit_nested(expr: Any) -> Optional[Any]: + # Utility for a recursive call, preserving options and updating the depth. + return visit_collection( + expr, + _callback, + return_data=return_data, + remove_annotations=remove_annotations, + max_depth=max_depth - 1, + _seen=_seen, + ) + + def visit_expression(expr: Any) -> Any: + return _callback(expr) # --- 1. Visit every expression try: @@ -329,10 +437,6 @@ def visit_expression(expr): else: _seen.add(id(expr)) - # Get the expression type; treat iterators like lists - typ = list if isinstance(expr, IteratorABC) and isiterable(expr) else type(expr) - typ = cast(type, typ) # mypy treats this as 'object' otherwise and complains - # Then visit every item in the expression if it is a collection # presume that the result is the original expression. @@ -354,9 +458,10 @@ def visit_expression(expr): # --- Annotations (unmapped, quote, etc.) elif isinstance(expr, BaseAnnotation): + annotated = cast(BaseAnnotation[Any], expr) if context is not None: - context["annotation"] = expr - unwrapped = expr.unwrap() + context["annotation"] = cast(VT, annotated) + unwrapped = annotated.unwrap() value = visit_nested(unwrapped) if return_data: @@ -365,47 +470,49 @@ def visit_expression(expr): result = value # if the value was modified, rewrap it elif value is not unwrapped: - result = expr.rewrap(value) + result = annotated.rewrap(value) # otherwise return the expr # --- Sequences elif isinstance(expr, (list, tuple, set)): - items = [visit_nested(o) for o in expr] + seq = cast(Union[list[Any], tuple[Any], set[Any]], expr) + items = [visit_nested(o) for o in seq] if return_data: - modified = any(item is not orig for item, orig in zip(items, expr)) + modified = any(item is not orig for item, orig in zip(items, seq)) if modified: - result = typ(items) + result = type(seq)(items) # --- Dictionaries - elif typ in (dict, OrderedDict): - assert isinstance(expr, (dict, OrderedDict)) # typecheck assertion - items = [(visit_nested(k), visit_nested(v)) for k, v in expr.items()] + elif isinstance(expr, (dict, OrderedDict)): + mapping = cast(dict[Any, Any], expr) + items = [(visit_nested(k), visit_nested(v)) for k, v in mapping.items()] if return_data: modified = any( k1 is not k2 or v1 is not v2 - for (k1, v1), (k2, v2) in zip(items, expr.items()) + for (k1, v1), (k2, v2) in zip(items, mapping.items()) ) if modified: - result = typ(items) + result = type(mapping)(items) # --- Dataclasses elif is_dataclass(expr) and not isinstance(expr, type): - values = [visit_nested(getattr(expr, f.name)) for f in fields(expr)] + expr_fields = fields(expr) + values = [visit_nested(getattr(expr, f.name)) for f in expr_fields] if return_data: modified = any( - getattr(expr, f.name) is not v for f, v in zip(fields(expr), values) + getattr(expr, f.name) is not v for f, v in zip(expr_fields, values) ) if modified: - result = typ(**{f.name: v for f, v in zip(fields(expr), values)}) + result = replace( + expr, **{f.name: v for f, v in zip(expr_fields, values)} + ) # --- Pydantic models elif isinstance(expr, pydantic.BaseModel): - typ = cast(Type[pydantic.BaseModel], typ) - # when extra=allow, fields not in model_fields may be in model_fields_set model_fields = expr.model_fields_set.union(expr.model_fields.keys()) @@ -424,7 +531,7 @@ def visit_expression(expr): ) if modified: # Use construct to avoid validation and handle immutability - model_instance = typ.model_construct( + model_instance = expr.model_construct( _fields_set=expr.model_fields_set, **updated_data ) for private_attr in expr.__private_attributes__: @@ -435,7 +542,21 @@ def visit_expression(expr): return result -def remove_nested_keys(keys_to_remove: List[Hashable], obj): +@overload +def remove_nested_keys( + keys_to_remove: list[HashableT], obj: NestedDict[HashableT, VT] +) -> NestedDict[HashableT, VT]: + ... + + +@overload +def remove_nested_keys(keys_to_remove: list[HashableT], obj: Any) -> Any: + ... + + +def remove_nested_keys( + keys_to_remove: list[HashableT], obj: Union[NestedDict[HashableT, VT], Any] +) -> Union[NestedDict[HashableT, VT], Any]: """ Recurses a dictionary returns a copy without all keys that match an entry in `key_to_remove`. Return `obj` unchanged if not a dictionary. @@ -452,24 +573,56 @@ def remove_nested_keys(keys_to_remove: List[Hashable], obj): return obj return { key: remove_nested_keys(keys_to_remove, value) - for key, value in obj.items() + for key, value in cast(NestedDict[HashableT, VT], obj).items() if key not in keys_to_remove } +@overload +def distinct(iterable: Iterable[HashableT], key: None = None) -> Iterator[HashableT]: + ... + + +@overload +def distinct(iterable: Iterable[T], key: Callable[[T], Hashable]) -> Iterator[T]: + ... + + def distinct( - iterable: Iterable[T], - key: Callable[[T], Any] = (lambda i: i), -) -> Generator[T, None, None]: - seen: Set = set() + iterable: Iterable[Union[T, HashableT]], + key: Optional[Callable[[T], Hashable]] = None, +) -> Iterator[Union[T, HashableT]]: + def _key(__i: Any) -> Hashable: + return __i + + if key is not None: + _key = cast(Callable[[Any], Hashable], key) + + seen: set[Hashable] = set() for item in iterable: - if key(item) in seen: + if _key(item) in seen: continue - seen.add(key(item)) + seen.add(_key(item)) yield item -def get_from_dict(dct: Dict, keys: Union[str, List[str]], default: Any = None) -> Any: +@overload +def get_from_dict( + dct: NestedDict[str, VT], keys: Union[str, list[str]], default: None = None +) -> Optional[VT]: + ... + + +@overload +def get_from_dict( + dct: NestedDict[str, VT], keys: Union[str, list[str]], default: R +) -> Union[VT, R]: + ... + + +def get_from_dict( + dct: NestedDict[str, VT], keys: Union[str, list[str]], default: Optional[R] = None +) -> Union[VT, R, None]: """ Fetch a value from a nested dictionary or list using a sequence of keys. @@ -500,6 +653,7 @@ def get_from_dict(dct: Dict, keys: Union[str, List[str]], default: Any = None) - """ if isinstance(keys, str): keys = keys.replace("[", ".").replace("]", "").split(".") + value = dct try: for key in keys: try: @@ -509,13 +663,15 @@ def get_from_dict(dct: Dict, keys: Union[str, List[str]], default: Any = None) - # If it's not an int, use the key as-is # for dict lookup pass - dct = dct[key] - return dct + value = value[key] # type: ignore + return cast(VT, value) except (TypeError, KeyError, IndexError): return default -def set_in_dict(dct: Dict, keys: Union[str, List[str]], value: Any): +def set_in_dict( + dct: NestedDict[str, VT], keys: Union[str, list[str]], value: VT +) -> None: """ Sets a value in a nested dictionary using a sequence of keys. @@ -543,11 +699,13 @@ def set_in_dict(dct: Dict, keys: Union[str, List[str]], value: Any): raise TypeError(f"Key path exists and contains a non-dict value: {keys}") if k not in dct: dct[k] = {} - dct = dct[k] + dct = cast(NestedDict[str, VT], dct[k]) dct[keys[-1]] = value -def deep_merge(dct: Dict, merge: Dict): +def deep_merge( + dct: NestedDict[str, VT1], merge: NestedDict[str, VT2] +) -> NestedDict[str, Union[VT1, VT2]]: """ Recursively merges `merge` into `dct`. @@ -558,18 +716,21 @@ def deep_merge(dct: Dict, merge: Dict): Returns: A new dictionary with the merged contents. """ - result = dct.copy() # Start with keys and values from `dct` + result: dict[str, Any] = dct.copy() # Start with keys and values from `dct` for key, value in merge.items(): if key in result and isinstance(result[key], dict) and isinstance(value, dict): # If both values are dictionaries, merge them recursively - result[key] = deep_merge(result[key], value) + result[key] = deep_merge( + cast(NestedDict[str, VT1], result[key]), + cast(NestedDict[str, VT2], value), + ) else: # Otherwise, overwrite with the new value - result[key] = value + result[key] = cast(Union[VT2, NestedDict[str, VT2]], value) return result -def deep_merge_dicts(*dicts): +def deep_merge_dicts(*dicts: NestedDict[str, Any]) -> NestedDict[str, Any]: """ Recursively merges multiple dictionaries. @@ -579,7 +740,7 @@ def deep_merge_dicts(*dicts): Returns: A new dictionary with the merged contents. """ - result = {} + result: NestedDict[str, Any] = {} for dictionary in dicts: result = deep_merge(result, dictionary) return result diff --git a/src/prefect/utilities/compat.py b/src/prefect/utilities/compat.py index 3eadafb3edf3..6bf8f34c46ca 100644 --- a/src/prefect/utilities/compat.py +++ b/src/prefect/utilities/compat.py @@ -3,29 +3,21 @@ """ # Please organize additions to this file by version -import asyncio import sys -from shutil import copytree -from signal import raise_signal if sys.version_info < (3, 10): - import importlib_metadata - from importlib_metadata import EntryPoint, EntryPoints, entry_points + import importlib_metadata as importlib_metadata + from importlib_metadata import ( + EntryPoint as EntryPoint, + EntryPoints as EntryPoints, + entry_points as entry_points, + ) else: - import importlib.metadata as importlib_metadata - from importlib.metadata import EntryPoint, EntryPoints, entry_points + import importlib.metadata + from importlib.metadata import ( + EntryPoint as EntryPoint, + EntryPoints as EntryPoints, + entry_points as entry_points, + ) -if sys.version_info < (3, 9): - # https://docs.python.org/3/library/asyncio-task.html#asyncio.to_thread - - import functools - - async def asyncio_to_thread(fn, *args, **kwargs): - loop = asyncio.get_running_loop() - return await loop.run_in_executor(None, functools.partial(fn, *args, **kwargs)) - -else: - from asyncio import to_thread as asyncio_to_thread - -if sys.platform != "win32": - from asyncio import ThreadedChildWatcher + importlib_metadata = importlib.metadata diff --git a/src/prefect/utilities/context.py b/src/prefect/utilities/context.py index 3bd87f975f40..c475f3eea453 100644 --- a/src/prefect/utilities/context.py +++ b/src/prefect/utilities/context.py @@ -1,6 +1,7 @@ +from collections.abc import Generator from contextlib import contextmanager from contextvars import Context, ContextVar, Token -from typing import TYPE_CHECKING, Dict, Optional, Tuple, cast +from typing import TYPE_CHECKING, Any, Optional, cast from uuid import UUID if TYPE_CHECKING: @@ -8,8 +9,8 @@ @contextmanager -def temporary_context(context: Context): - tokens: Dict[ContextVar, Token] = {} +def temporary_context(context: Context) -> Generator[None, Any, None]: + tokens: dict[ContextVar[Any], Token[Any]] = {} for key, value in context.items(): token = key.set(value) tokens[key] = token @@ -38,11 +39,11 @@ def get_flow_run_id() -> Optional[UUID]: return None -def get_task_and_flow_run_ids() -> Tuple[Optional[UUID], Optional[UUID]]: +def get_task_and_flow_run_ids() -> tuple[Optional[UUID], Optional[UUID]]: """ Get the task run and flow run ids from the context, if available. Returns: - Tuple[Optional[UUID], Optional[UUID]]: a tuple of the task run id and flow run id + tuple[Optional[UUID], Optional[UUID]]: a tuple of the task run id and flow run id """ return get_task_run_id(), get_flow_run_id() diff --git a/src/prefect/utilities/dispatch.py b/src/prefect/utilities/dispatch.py index 599a9afafda4..603c24aecbd1 100644 --- a/src/prefect/utilities/dispatch.py +++ b/src/prefect/utilities/dispatch.py @@ -23,28 +23,39 @@ class Foo(Base): import abc import inspect import warnings -from typing import Any, Dict, Optional, Type, TypeVar +from typing import Any, Literal, Optional, TypeVar, overload -T = TypeVar("T", bound=Type) +T = TypeVar("T", bound=type[Any]) -_TYPE_REGISTRIES: Dict[Type, Dict[str, Type]] = {} +_TYPE_REGISTRIES: dict[Any, dict[str, Any]] = {} -def get_registry_for_type(cls: T) -> Optional[Dict[str, T]]: +def get_registry_for_type(cls: T) -> Optional[dict[str, T]]: """ Get the first matching registry for a class or any of its base classes. If not found, `None` is returned. """ return next( - filter( - lambda registry: registry is not None, - (_TYPE_REGISTRIES.get(cls) for cls in cls.mro()), - ), + (reg for cls in cls.mro() if (reg := _TYPE_REGISTRIES.get(cls)) is not None), None, ) +@overload +def get_dispatch_key( + cls_or_instance: Any, allow_missing: Literal[False] = False +) -> str: + ... + + +@overload +def get_dispatch_key( + cls_or_instance: Any, allow_missing: Literal[True] = ... +) -> Optional[str]: + ... + + def get_dispatch_key( cls_or_instance: Any, allow_missing: bool = False ) -> Optional[str]: @@ -89,14 +100,14 @@ def get_dispatch_key( @classmethod -def _register_subclass_of_base_type(cls, **kwargs): +def _register_subclass_of_base_type(cls: type[Any], **kwargs: Any) -> None: if hasattr(cls, "__init_subclass_original__"): cls.__init_subclass_original__(**kwargs) elif hasattr(cls, "__pydantic_init_subclass_original__"): cls.__pydantic_init_subclass_original__(**kwargs) # Do not register abstract base classes - if abc.ABC in getattr(cls, "__bases__", []): + if abc.ABC in cls.__bases__: return register_type(cls) @@ -123,7 +134,7 @@ def register_base_type(cls: T) -> T: cls.__pydantic_init_subclass__ = _register_subclass_of_base_type else: cls.__init_subclass_original__ = getattr(cls, "__init_subclass__") - cls.__init_subclass__ = _register_subclass_of_base_type + setattr(cls, "__init_subclass__", _register_subclass_of_base_type) return cls @@ -190,7 +201,7 @@ def lookup_type(cls: T, dispatch_key: str) -> T: Look up a dispatch key in the type registry for the given class. """ # Get the first matching registry for the class or one of its bases - registry = get_registry_for_type(cls) + registry = get_registry_for_type(cls) or {} # Look up this type in the registry subcls = registry.get(dispatch_key) diff --git a/src/prefect/utilities/dockerutils.py b/src/prefect/utilities/dockerutils.py index 72575d81d3a0..eb6bd18b024b 100644 --- a/src/prefect/utilities/dockerutils.py +++ b/src/prefect/utilities/dockerutils.py @@ -3,22 +3,12 @@ import shutil import sys import warnings +from collections.abc import Generator, Iterable, Iterator from contextlib import contextmanager from pathlib import Path, PurePosixPath from tempfile import TemporaryDirectory from types import TracebackType -from typing import ( - TYPE_CHECKING, - Any, - Generator, - Iterable, - List, - Optional, - TextIO, - Tuple, - Type, - Union, -) +from typing import TYPE_CHECKING, Any, Optional, TextIO, Union, cast from urllib.parse import urlsplit import pendulum @@ -29,6 +19,12 @@ from prefect.utilities.importtools import lazy_import from prefect.utilities.slugify import slugify +if TYPE_CHECKING: + import docker + import docker.errors + from docker import DockerClient + from docker.models.images import Image + CONTAINER_LABELS = { "io.prefect.version": prefect.__version__, } @@ -102,10 +98,7 @@ def silence_docker_warnings() -> Generator[None, None, None]: # want to have those popping up in various modules and test suites. Instead, # consolidate the imports we need here, and expose them via this module. with silence_docker_warnings(): - if TYPE_CHECKING: - import docker - from docker import DockerClient - else: + if not TYPE_CHECKING: docker = lazy_import("docker") @@ -123,7 +116,8 @@ def docker_client() -> Generator["DockerClient", None, None]: "This error is often thrown because Docker is not running. Please ensure Docker is running." ) from exc finally: - client is not None and client.close() + if client is not None: + client.close() # type: ignore # typing stub is not complete class BuildError(Exception): @@ -207,14 +201,15 @@ class ImageBuilder: base_directory: Path context: Optional[Path] platform: Optional[str] - dockerfile_lines: List[str] + dockerfile_lines: list[str] + temporary_directory: Optional[TemporaryDirectory[str]] def __init__( self, base_image: str, - base_directory: Path = None, + base_directory: Optional[Path] = None, platform: Optional[str] = None, - context: Path = None, + context: Optional[Path] = None, ): """Create an ImageBuilder @@ -250,7 +245,7 @@ def __enter__(self) -> Self: return self def __exit__( - self, exc: Type[BaseException], value: BaseException, traceback: TracebackType + self, exc: type[BaseException], value: BaseException, traceback: TracebackType ) -> None: if not self.temporary_directory: return @@ -267,7 +262,9 @@ def add_lines(self, lines: Iterable[str]) -> None: """Add lines to this image's Dockerfile""" self.dockerfile_lines.extend(lines) - def copy(self, source: Union[str, Path], destination: Union[str, PurePosixPath]): + def copy( + self, source: Union[str, Path], destination: Union[str, PurePosixPath] + ) -> None: """Copy a file to this image""" if not self.context: raise Exception("No context available") @@ -291,7 +288,7 @@ def copy(self, source: Union[str, Path], destination: Union[str, PurePosixPath]) self.add_line(f"COPY {source} {destination}") - def write_text(self, text: str, destination: Union[str, PurePosixPath]): + def write_text(self, text: str, destination: Union[str, PurePosixPath]) -> None: if not self.context: raise Exception("No context available") @@ -315,6 +312,7 @@ def build( Returns: The image ID """ + assert self.context is not None dockerfile_path: Path = self.context / "Dockerfile" with dockerfile_path.open("w") as dockerfile: @@ -436,9 +434,12 @@ def push_image( repository = f"{registry}/{name}" with docker_client() as client: - image: "docker.Image" = client.images.get(image_id) - image.tag(repository, tag=tag) - events = client.api.push(repository, tag=tag, stream=True, decode=True) + image: "Image" = client.images.get(image_id) + image.tag(repository, tag=tag) # type: ignore # typing stub is not complete + events = cast( + Iterator[dict[str, Any]], + client.api.push(repository, tag=tag, stream=True, decode=True), # type: ignore # typing stub is not complete + ) try: for event in events: if "status" in event: @@ -452,12 +453,12 @@ def push_image( elif "error" in event: raise PushError(event["error"]) finally: - client.api.remove_image(f"{repository}:{tag}", noprune=True) + client.api.remove_image(f"{repository}:{tag}", noprune=True) # type: ignore # typing stub is not complete return f"{repository}:{tag}" -def to_run_command(command: List[str]) -> str: +def to_run_command(command: list[str]) -> str: """ Convert a process-style list of command arguments to a single Dockerfile RUN instruction. @@ -481,7 +482,7 @@ def to_run_command(command: List[str]) -> str: return run_command -def parse_image_tag(name: str) -> Tuple[str, Optional[str]]: +def parse_image_tag(name: str) -> tuple[str, Optional[str]]: """ Parse Docker Image String @@ -519,7 +520,7 @@ def parse_image_tag(name: str) -> Tuple[str, Optional[str]]: return image_name, tag -def split_repository_path(repository_path: str) -> Tuple[Optional[str], str]: +def split_repository_path(repository_path: str) -> tuple[Optional[str], str]: """ Splits a Docker repository path into its namespace and repository components. @@ -550,7 +551,7 @@ def split_repository_path(repository_path: str) -> Tuple[Optional[str], str]: return namespace, repository -def format_outlier_version_name(version: str): +def format_outlier_version_name(version: str) -> str: """ Formats outlier docker version names to pass `packaging.version.parse` validation - Current cases are simple, but creates stub for more complicated formatting if eventually needed. @@ -580,7 +581,7 @@ def generate_default_dockerfile(context: Optional[Path] = None): """ if not context: context = Path.cwd() - lines = [] + lines: list[str] = [] base_image = get_prefect_image_name() lines.append(f"FROM {base_image}") dir_name = context.name diff --git a/src/prefect/utilities/engine.py b/src/prefect/utilities/engine.py index fd94104d3773..7551c0d5091f 100644 --- a/src/prefect/utilities/engine.py +++ b/src/prefect/utilities/engine.py @@ -1,40 +1,39 @@ import asyncio import contextlib -import inspect import os import signal import time +from collections.abc import Awaitable, Callable, Generator from functools import partial +from logging import Logger from typing import ( TYPE_CHECKING, Any, - Callable, - Dict, - Iterable, + NoReturn, Optional, - Set, TypeVar, Union, + cast, ) -from uuid import UUID, uuid4 +from uuid import UUID import anyio -from typing_extensions import Literal +from typing_extensions import TypeIs import prefect import prefect.context +import prefect.exceptions import prefect.plugins from prefect._internal.concurrency.cancellation import get_deadline from prefect.client.schemas import OrchestrationResult, TaskRun -from prefect.client.schemas.objects import ( - StateType, - TaskRunInput, - TaskRunResult, -) -from prefect.client.schemas.responses import SetStateStatus -from prefect.context import ( - FlowRunContext, +from prefect.client.schemas.objects import TaskRunInput, TaskRunResult +from prefect.client.schemas.responses import ( + SetStateStatus, + StateAbortDetails, + StateRejectDetails, + StateWaitDetails, ) +from prefect.context import FlowRunContext from prefect.events import Event, emit_event from prefect.exceptions import ( Pause, @@ -44,37 +43,26 @@ ) from prefect.flows import Flow from prefect.futures import PrefectFuture -from prefect.logging.loggers import ( - get_logger, - task_run_logger, -) +from prefect.logging.loggers import get_logger from prefect.results import BaseResult, ResultRecord, should_persist_result -from prefect.settings import ( - PREFECT_LOGGING_LOG_PRINTS, -) -from prefect.states import ( - State, - get_state_exception, -) +from prefect.settings import PREFECT_LOGGING_LOG_PRINTS +from prefect.states import State from prefect.tasks import Task from prefect.utilities.annotations import allow_failure, quote -from prefect.utilities.asyncutils import ( - gather, - run_coro_as_sync, -) +from prefect.utilities.asyncutils import run_coro_as_sync from prefect.utilities.collections import StopVisiting, visit_collection from prefect.utilities.text import truncated_to if TYPE_CHECKING: from prefect.client.orchestration import PrefectClient, SyncPrefectClient -API_HEALTHCHECKS = {} -UNTRACKABLE_TYPES = {bool, type(None), type(...), type(NotImplemented)} -engine_logger = get_logger("engine") +API_HEALTHCHECKS: dict[str, float] = {} +UNTRACKABLE_TYPES: set[type[Any]] = {bool, type(None), type(...), type(NotImplemented)} +engine_logger: Logger = get_logger("engine") T = TypeVar("T") -async def collect_task_run_inputs(expr: Any, max_depth: int = -1) -> Set[TaskRunInput]: +async def collect_task_run_inputs(expr: Any, max_depth: int = -1) -> set[TaskRunInput]: """ This function recurses through an expression to generate a set of any discernible task run inputs it finds in the data structure. It produces a set of all inputs @@ -87,14 +75,11 @@ async def collect_task_run_inputs(expr: Any, max_depth: int = -1) -> Set[TaskRun """ # TODO: This function needs to be updated to detect parameters and constants - inputs = set() - futures = set() + inputs: set[TaskRunInput] = set() - def add_futures_and_states_to_inputs(obj): + def add_futures_and_states_to_inputs(obj: Any) -> None: if isinstance(obj, PrefectFuture): - # We need to wait for futures to be submitted before we can get the task - # run id but we want to do so asynchronously - futures.add(obj) + inputs.add(TaskRunResult(id=obj.task_run_id)) elif isinstance(obj, State): if obj.state_details.task_run_id: inputs.add(TaskRunResult(id=obj.state_details.task_run_id)) @@ -113,16 +98,12 @@ def add_futures_and_states_to_inputs(obj): max_depth=max_depth, ) - await asyncio.gather(*[future._wait_for_submission() for future in futures]) - for future in futures: - inputs.add(TaskRunResult(id=future.task_run.id)) - return inputs def collect_task_run_inputs_sync( expr: Any, future_cls: Any = PrefectFuture, max_depth: int = -1 -) -> Set[TaskRunInput]: +) -> set[TaskRunInput]: """ This function recurses through an expression to generate a set of any discernible task run inputs it finds in the data structure. It produces a set of all inputs @@ -135,9 +116,9 @@ def collect_task_run_inputs_sync( """ # TODO: This function needs to be updated to detect parameters and constants - inputs = set() + inputs: set[TaskRunInput] = set() - def add_futures_and_states_to_inputs(obj): + def add_futures_and_states_to_inputs(obj: Any) -> None: if isinstance(obj, future_cls) and hasattr(obj, "task_run_id"): inputs.add(TaskRunResult(id=obj.task_run_id)) elif isinstance(obj, State): @@ -161,58 +142,9 @@ def add_futures_and_states_to_inputs(obj): return inputs -async def wait_for_task_runs_and_report_crashes( - task_run_futures: Iterable[PrefectFuture], client: "PrefectClient" -) -> Literal[True]: - crash_exceptions = [] - - # Gather states concurrently first - states = await gather(*(future._wait for future in task_run_futures)) - - for future, state in zip(task_run_futures, states): - logger = task_run_logger(future.task_run) - - if not state.type == StateType.CRASHED: - continue - - # We use this utility instead of `state.result` for type checking - exception = await get_state_exception(state) - - task_run = await client.read_task_run(future.task_run.id) - if not task_run.state.is_crashed(): - logger.info(f"Crash detected! {state.message}") - logger.debug("Crash details:", exc_info=exception) - - # Update the state of the task run - result = await client.set_task_run_state( - task_run_id=future.task_run.id, state=state, force=True - ) - if result.status == SetStateStatus.ACCEPT: - engine_logger.debug( - f"Reported crashed task run {future.name!r} successfully." - ) - else: - engine_logger.warning( - f"Failed to report crashed task run {future.name!r}. " - f"Orchestrator did not accept state: {result!r}" - ) - else: - # Populate the state details on the local state - future._final_state.state_details = task_run.state.state_details - - crash_exceptions.append(exception) - - # Now that we've finished reporting crashed tasks, reraise any exit exceptions - for exception in crash_exceptions: - if isinstance(exception, (KeyboardInterrupt, SystemExit)): - raise exception - - return True - - @contextlib.contextmanager -def capture_sigterm(): - def cancel_flow_run(*args): +def capture_sigterm() -> Generator[None, Any, None]: + def cancel_flow_run(*args: object) -> NoReturn: raise TerminationSignal(signal=signal.SIGTERM) original_term_handler = None @@ -241,8 +173,8 @@ def cancel_flow_run(*args): async def resolve_inputs( - parameters: Dict[str, Any], return_data: bool = True, max_depth: int = -1 -) -> Dict[str, Any]: + parameters: dict[str, Any], return_data: bool = True, max_depth: int = -1 +) -> dict[str, Any]: """ Resolve any `Quote`, `PrefectFuture`, or `State` types nested in parameters into data. @@ -254,24 +186,26 @@ async def resolve_inputs( UpstreamTaskError: If any of the upstream states are not `COMPLETED` """ - futures = set() - states = set() - result_by_state = {} + futures: set[PrefectFuture[Any]] = set() + states: set[State[Any]] = set() + result_by_state: dict[State[Any], Any] = {} if not parameters: return {} - def collect_futures_and_states(expr, context): + def collect_futures_and_states(expr: Any, context: dict[str, Any]) -> Any: # Expressions inside quotes should not be traversed if isinstance(context.get("annotation"), quote): raise StopVisiting() if isinstance(expr, PrefectFuture): - futures.add(expr) + fut: PrefectFuture[Any] = expr + futures.add(fut) if isinstance(expr, State): - states.add(expr) + state: State[Any] = expr + states.add(state) - return expr + return cast(Any, expr) visit_collection( parameters, @@ -281,32 +215,27 @@ def collect_futures_and_states(expr, context): context={}, ) - # Wait for all futures so we do not block when we retrieve the state in `resolve_input` - states.update(await asyncio.gather(*[future._wait() for future in futures])) - # Only retrieve the result if requested as it may be expensive if return_data: finished_states = [state for state in states if state.is_final()] - state_results = await asyncio.gather( - *[ - state.result(raise_on_failure=False, fetch=True) - for state in finished_states - ] - ) + state_results = [ + state.result(raise_on_failure=False, fetch=True) + for state in finished_states + ] for state, result in zip(finished_states, state_results): result_by_state[state] = result - def resolve_input(expr, context): - state = None + def resolve_input(expr: Any, context: dict[str, Any]) -> Any: + state: Optional[State[Any]] = None # Expressions inside quotes should not be modified if isinstance(context.get("annotation"), quote): raise StopVisiting() if isinstance(expr, PrefectFuture): - state = expr._final_state + state = expr.state elif isinstance(expr, State): state = expr else: @@ -329,7 +258,7 @@ def resolve_input(expr, context): return result_by_state.get(state) - resolved_parameters = {} + resolved_parameters: dict[str, Any] = {} for parameter, value in parameters.items(): try: resolved_parameters[parameter] = visit_collection( @@ -353,13 +282,21 @@ def resolve_input(expr, context): return resolved_parameters +def _is_base_result(data: Any) -> TypeIs[BaseResult[Any]]: + return isinstance(data, BaseResult) + + +def _is_result_record(data: Any) -> TypeIs[ResultRecord[Any]]: + return isinstance(data, ResultRecord) + + async def propose_state( client: "PrefectClient", - state: State[object], + state: State[Any], force: bool = False, task_run_id: Optional[UUID] = None, flow_run_id: Optional[UUID] = None, -) -> State[object]: +) -> State[Any]: """ Propose a new state for a flow run or task run, invoking Prefect orchestration logic. @@ -396,11 +333,12 @@ async def propose_state( # Handle task and sub-flow tracing if state.is_final(): - if isinstance(state.data, BaseResult) and state.data.has_cached_object(): + result: Any + if _is_base_result(state.data) and state.data.has_cached_object(): # Avoid fetching the result unless it is cached, otherwise we defeat # the purpose of disabling `cache_result_in_memory` - result = await state.result(raise_on_failure=False, fetch=True) - elif isinstance(state.data, ResultRecord): + result = state.result(raise_on_failure=False, fetch=True) + elif _is_result_record(state.data): result = state.data.result else: result = state.data @@ -409,9 +347,13 @@ async def propose_state( # Handle repeated WAITs in a loop instead of recursively, to avoid # reaching max recursion depth in extreme cases. - async def set_state_and_handle_waits(set_state_func) -> OrchestrationResult: + async def set_state_and_handle_waits( + set_state_func: Callable[[], Awaitable[OrchestrationResult[Any]]], + ) -> OrchestrationResult[Any]: response = await set_state_func() while response.status == SetStateStatus.WAIT: + if TYPE_CHECKING: + assert isinstance(response.details, StateWaitDetails) engine_logger.debug( f"Received wait instruction for {response.details.delay_seconds}s: " f"{response.details.reason}" @@ -436,6 +378,8 @@ async def set_state_and_handle_waits(set_state_func) -> OrchestrationResult: # Parse the response to return the new state if response.status == SetStateStatus.ACCEPT: # Update the state with the details if provided + if TYPE_CHECKING: + assert response.state is not None state.id = response.state.id state.timestamp = response.state.timestamp if response.state.state_details: @@ -443,9 +387,16 @@ async def set_state_and_handle_waits(set_state_func) -> OrchestrationResult: return state elif response.status == SetStateStatus.ABORT: + if TYPE_CHECKING: + assert isinstance(response.details, StateAbortDetails) + raise prefect.exceptions.Abort(response.details.reason) elif response.status == SetStateStatus.REJECT: + if TYPE_CHECKING: + assert response.state is not None + assert isinstance(response.details, StateRejectDetails) + if response.state.is_paused(): raise Pause(response.details.reason, state=response.state) return response.state @@ -458,11 +409,11 @@ async def set_state_and_handle_waits(set_state_func) -> OrchestrationResult: def propose_state_sync( client: "SyncPrefectClient", - state: State[object], + state: State[Any], force: bool = False, task_run_id: Optional[UUID] = None, flow_run_id: Optional[UUID] = None, -) -> State[object]: +) -> State[Any]: """ Propose a new state for a flow run or task run, invoking Prefect orchestration logic. @@ -499,13 +450,13 @@ def propose_state_sync( # Handle task and sub-flow tracing if state.is_final(): - if isinstance(state.data, BaseResult) and state.data.has_cached_object(): + if _is_base_result(state.data) and state.data.has_cached_object(): # Avoid fetching the result unless it is cached, otherwise we defeat # the purpose of disabling `cache_result_in_memory` result = state.result(raise_on_failure=False, fetch=True) if asyncio.iscoroutine(result): result = run_coro_as_sync(result) - elif isinstance(state.data, ResultRecord): + elif _is_result_record(state.data): result = state.data.result else: result = state.data @@ -514,9 +465,13 @@ def propose_state_sync( # Handle repeated WAITs in a loop instead of recursively, to avoid # reaching max recursion depth in extreme cases. - def set_state_and_handle_waits(set_state_func) -> OrchestrationResult: + def set_state_and_handle_waits( + set_state_func: Callable[[], OrchestrationResult[Any]], + ) -> OrchestrationResult[Any]: response = set_state_func() while response.status == SetStateStatus.WAIT: + if TYPE_CHECKING: + assert isinstance(response.details, StateWaitDetails) engine_logger.debug( f"Received wait instruction for {response.details.delay_seconds}s: " f"{response.details.reason}" @@ -540,6 +495,8 @@ def set_state_and_handle_waits(set_state_func) -> OrchestrationResult: # Parse the response to return the new state if response.status == SetStateStatus.ACCEPT: + if TYPE_CHECKING: + assert response.state is not None # Update the state with the details if provided state.id = response.state.id state.timestamp = response.state.timestamp @@ -548,9 +505,14 @@ def set_state_and_handle_waits(set_state_func) -> OrchestrationResult: return state elif response.status == SetStateStatus.ABORT: + if TYPE_CHECKING: + assert isinstance(response.details, StateAbortDetails) raise prefect.exceptions.Abort(response.details.reason) elif response.status == SetStateStatus.REJECT: + if TYPE_CHECKING: + assert response.state is not None + assert isinstance(response.details, StateRejectDetails) if response.state.is_paused(): raise Pause(response.details.reason, state=response.state) return response.state @@ -561,26 +523,6 @@ def set_state_and_handle_waits(set_state_func) -> OrchestrationResult: ) -def _dynamic_key_for_task_run( - context: FlowRunContext, task: Task, stable: bool = True -) -> Union[int, str]: - if ( - stable is False or context.detached - ): # this task is running on remote infrastructure - return str(uuid4()) - elif context.flow_run is None: # this is an autonomous task run - context.task_run_dynamic_keys[task.task_key] = getattr( - task, "dynamic_key", str(uuid4()) - ) - - elif task.task_key not in context.task_run_dynamic_keys: - context.task_run_dynamic_keys[task.task_key] = 0 - else: - context.task_run_dynamic_keys[task.task_key] += 1 - - return context.task_run_dynamic_keys[task.task_key] - - def get_state_for_result(obj: Any) -> Optional[State]: """ Get the state related to a result object. @@ -631,28 +573,29 @@ def link_state_to_result(state: State, result: Any) -> None: # Holding large user objects in memory can cause memory bloat linked_state = state.model_copy(update={"data": None}) - def link_if_trackable(obj: Any) -> None: - """Track connection between a task run result and its associated state if it has a unique ID. + if flow_run_context: - We cannot track booleans, Ellipsis, None, NotImplemented, or the integers from -5 to 256 - because they are singletons. + def link_if_trackable(obj: Any) -> None: + """Track connection between a task run result and its associated state if it has a unique ID. - This function will mutate the State if the object is an untrackable type by setting the value - for `State.state_details.untrackable_result` to `True`. + We cannot track booleans, Ellipsis, None, NotImplemented, or the integers from -5 to 256 + because they are singletons. - """ - if (type(obj) in UNTRACKABLE_TYPES) or ( - isinstance(obj, int) and (-5 <= obj <= 256) - ): - state.state_details.untrackable_result = True - return - flow_run_context.task_run_results[id(obj)] = linked_state + This function will mutate the State if the object is an untrackable type by setting the value + for `State.state_details.untrackable_result` to `True`. + + """ + if (type(obj) in UNTRACKABLE_TYPES) or ( + isinstance(obj, int) and (-5 <= obj <= 256) + ): + state.state_details.untrackable_result = True + return + flow_run_context.task_run_results[id(obj)] = linked_state - if flow_run_context: visit_collection(expr=result, visit_fn=link_if_trackable, max_depth=1) -def should_log_prints(flow_or_task: Union[Flow, Task]) -> bool: +def should_log_prints(flow_or_task: Union["Flow[..., Any]", "Task[..., Any]"]) -> bool: flow_run_context = FlowRunContext.get() if flow_or_task.log_prints is None: @@ -664,63 +607,7 @@ def should_log_prints(flow_or_task: Union[Flow, Task]) -> bool: return flow_or_task.log_prints -def _resolve_custom_flow_run_name(flow: Flow, parameters: Dict[str, Any]) -> str: - if callable(flow.flow_run_name): - flow_run_name = flow.flow_run_name() - if not isinstance(flow_run_name, str): - raise TypeError( - f"Callable {flow.flow_run_name} for 'flow_run_name' returned type" - f" {type(flow_run_name).__name__} but a string is required." - ) - elif isinstance(flow.flow_run_name, str): - flow_run_name = flow.flow_run_name.format(**parameters) - else: - raise TypeError( - "Expected string or callable for 'flow_run_name'; got" - f" {type(flow.flow_run_name).__name__} instead." - ) - - return flow_run_name - - -def _resolve_custom_task_run_name(task: Task, parameters: Dict[str, Any]) -> str: - if callable(task.task_run_name): - sig = inspect.signature(task.task_run_name) - - # If the callable accepts a 'parameters' kwarg, pass the entire parameters dict - if "parameters" in sig.parameters: - task_run_name = task.task_run_name(parameters=parameters) - else: - # If it doesn't expect parameters, call it without arguments - task_run_name = task.task_run_name() - - if not isinstance(task_run_name, str): - raise TypeError( - f"Callable {task.task_run_name} for 'task_run_name' returned type" - f" {type(task_run_name).__name__} but a string is required." - ) - elif isinstance(task.task_run_name, str): - task_run_name = task.task_run_name.format(**parameters) - else: - raise TypeError( - "Expected string or callable for 'task_run_name'; got" - f" {type(task.task_run_name).__name__} instead." - ) - - return task_run_name - - -def _get_hook_name(hook: Callable) -> str: - return ( - hook.__name__ - if hasattr(hook, "__name__") - else ( - hook.func.__name__ if isinstance(hook, partial) else hook.__class__.__name__ - ) - ) - - -async def check_api_reachable(client: "PrefectClient", fail_message: str): +async def check_api_reachable(client: "PrefectClient", fail_message: str) -> None: # Do not perform a healthcheck if it exists and is not expired api_url = str(client.api_url) if api_url in API_HEALTHCHECKS: @@ -740,15 +627,15 @@ async def check_api_reachable(client: "PrefectClient", fail_message: str): def emit_task_run_state_change_event( task_run: TaskRun, - initial_state: Optional[State], - validated_state: State, + initial_state: Optional[State[Any]], + validated_state: State[Any], follows: Optional[Event] = None, -) -> Event: +) -> Optional[Event]: state_message_truncation_length = 100_000 - if isinstance(validated_state.data, ResultRecord) and should_persist_result(): + if _is_result_record(validated_state.data) and should_persist_result(): data = validated_state.data.metadata.model_dump(mode="json") - elif isinstance(validated_state.data, BaseResult): + elif _is_base_result(validated_state.data): data = validated_state.data.model_dump(mode="json") else: data = None @@ -830,20 +717,20 @@ def emit_task_run_state_change_event( ) -def resolve_to_final_result(expr, context): +def resolve_to_final_result(expr: Any, context: dict[str, Any]) -> Any: """ Resolve any `PrefectFuture`, or `State` types nested in parameters into data. Designed to be use with `visit_collection`. """ - state = None + state: Optional[State[Any]] = None # Expressions inside quotes should not be modified if isinstance(context.get("annotation"), quote): raise StopVisiting() if isinstance(expr, PrefectFuture): - upstream_task_run = context.get("current_task_run") - upstream_task = context.get("current_task") + upstream_task_run: Optional[TaskRun] = context.get("current_task_run") + upstream_task: Optional["Task[..., Any]"] = context.get("current_task") if ( upstream_task and upstream_task_run @@ -877,15 +764,15 @@ def resolve_to_final_result(expr, context): " 'COMPLETED' state." ) - _result = state.result(raise_on_failure=False, fetch=True) - if asyncio.iscoroutine(_result): - _result = run_coro_as_sync(_result) - return _result + result = state.result(raise_on_failure=False, fetch=True) + if asyncio.iscoroutine(result): + result = run_coro_as_sync(result) + return result def resolve_inputs_sync( - parameters: Dict[str, Any], return_data: bool = True, max_depth: int = -1 -) -> Dict[str, Any]: + parameters: dict[str, Any], return_data: bool = True, max_depth: int = -1 +) -> dict[str, Any]: """ Resolve any `Quote`, `PrefectFuture`, or `State` types nested in parameters into data. @@ -900,7 +787,7 @@ def resolve_inputs_sync( if not parameters: return {} - resolved_parameters = {} + resolved_parameters: dict[str, Any] = {} for parameter, value in parameters.items(): try: resolved_parameters[parameter] = visit_collection( diff --git a/src/prefect/utilities/filesystem.py b/src/prefect/utilities/filesystem.py index 68a87a410039..1301a1178c91 100644 --- a/src/prefect/utilities/filesystem.py +++ b/src/prefect/utilities/filesystem.py @@ -5,14 +5,16 @@ import os import pathlib import threading +from collections.abc import Iterable from contextlib import contextmanager from pathlib import Path, PureWindowsPath -from typing import Optional, Union, cast +from typing import AnyStr, Optional, Union, cast -import fsspec +# fsspec has no stubs, see https://github.com/fsspec/filesystem_spec/issues/625 +import fsspec # type: ignore import pathspec -from fsspec.core import OpenFile -from fsspec.implementations.local import LocalFileSystem +from fsspec.core import OpenFile # type: ignore +from fsspec.implementations.local import LocalFileSystem # type: ignore import prefect @@ -33,8 +35,10 @@ def create_default_ignore_file(path: str) -> bool: def filter_files( - root: str = ".", ignore_patterns: Optional[list] = None, include_dirs: bool = True -) -> set: + root: str = ".", + ignore_patterns: Optional[Iterable[AnyStr]] = None, + include_dirs: bool = True, +) -> set[str]: """ This function accepts a root directory path and a list of file patterns to ignore, and returns a list of files that excludes those that should be ignored. @@ -51,7 +55,7 @@ def filter_files( return included_files -chdir_lock = threading.Lock() +chdir_lock: threading.Lock = threading.Lock() def _normalize_path(path: Union[str, Path]) -> str: @@ -103,33 +107,32 @@ def tmpchdir(path: str): def filename(path: str) -> str: """Extract the file name from a path with remote file system support""" try: - of: OpenFile = cast(OpenFile, fsspec.open(path)) - sep = of.fs.sep + of: OpenFile = cast(OpenFile, fsspec.open(path)) # type: ignore # no typing stubs available + sep = cast(str, of.fs.sep) # type: ignore # no typing stubs available except (ImportError, AttributeError): sep = "\\" if "\\" in path else "/" return path.split(sep)[-1] -def is_local_path(path: Union[str, pathlib.Path, OpenFile]): +def is_local_path(path: Union[str, pathlib.Path, OpenFile]) -> bool: """Check if the given path points to a local or remote file system""" if isinstance(path, str): try: - of = fsspec.open(path) + of = cast(OpenFile, fsspec.open(path)) # type: ignore # no typing stubs available except ImportError: # The path is a remote file system that uses a lib that is not installed return False elif isinstance(path, pathlib.Path): return True - elif isinstance(path, OpenFile): - of = path else: - raise TypeError(f"Invalid path of type {type(path).__name__!r}") + of = path return isinstance(of.fs, LocalFileSystem) def to_display_path( - path: Union[pathlib.Path, str], relative_to: Union[pathlib.Path, str] = None + path: Union[pathlib.Path, str], + relative_to: Optional[Union[pathlib.Path, str]] = None, ) -> str: """ Convert a path to a displayable path. The absolute path or relative path to the diff --git a/src/prefect/utilities/hashing.py b/src/prefect/utilities/hashing.py index 2724cb38c3f4..f131e4898314 100644 --- a/src/prefect/utilities/hashing.py +++ b/src/prefect/utilities/hashing.py @@ -1,21 +1,17 @@ import hashlib -import sys from functools import partial from pathlib import Path -from typing import Optional, Union +from typing import Any, Callable, Optional, Union -import cloudpickle +import cloudpickle # type: ignore # no stubs available from prefect.exceptions import HashError from prefect.serializers import JSONSerializer -if sys.version_info[:2] >= (3, 9): - _md5 = partial(hashlib.md5, usedforsecurity=False) -else: - _md5 = hashlib.md5 +_md5 = partial(hashlib.md5, usedforsecurity=False) -def stable_hash(*args: Union[str, bytes], hash_algo=_md5) -> str: +def stable_hash(*args: Union[str, bytes], hash_algo: Callable[..., Any] = _md5) -> str: """Given some arguments, produces a stable 64-bit hash of their contents. Supports bytes and strings. Strings will be UTF-8 encoded. @@ -35,7 +31,7 @@ def stable_hash(*args: Union[str, bytes], hash_algo=_md5) -> str: return h.hexdigest() -def file_hash(path: str, hash_algo=_md5) -> str: +def file_hash(path: str, hash_algo: Callable[..., Any] = _md5) -> str: """Given a path to a file, produces a stable hash of the file contents. Args: @@ -50,7 +46,10 @@ def file_hash(path: str, hash_algo=_md5) -> str: def hash_objects( - *args, hash_algo=_md5, raise_on_failure: bool = False, **kwargs + *args: Any, + hash_algo: Callable[..., Any] = _md5, + raise_on_failure: bool = False, + **kwargs: Any, ) -> Optional[str]: """ Attempt to hash objects by dumping to JSON or serializing with cloudpickle. @@ -77,7 +76,7 @@ def hash_objects( json_error = str(e) try: - return stable_hash(cloudpickle.dumps((args, kwargs)), hash_algo=hash_algo) + return stable_hash(cloudpickle.dumps((args, kwargs)), hash_algo=hash_algo) # type: ignore[reportUnknownMemberType] except Exception as e: pickle_error = str(e) diff --git a/src/prefect/utilities/importtools.py b/src/prefect/utilities/importtools.py index 7cbfce9d51de..d22deee762cb 100644 --- a/src/prefect/utilities/importtools.py +++ b/src/prefect/utilities/importtools.py @@ -5,20 +5,23 @@ import runpy import sys import warnings +from collections.abc import Iterable, Sequence from importlib.abc import Loader, MetaPathFinder from importlib.machinery import ModuleSpec +from io import TextIOWrapper +from logging import Logger from pathlib import Path from tempfile import NamedTemporaryFile from types import ModuleType -from typing import Any, Callable, Dict, Iterable, NamedTuple, Optional, Union +from typing import TYPE_CHECKING, Any, Callable, NamedTuple, Optional, Union -import fsspec +import fsspec # type: ignore # no typing stubs available from prefect.exceptions import ScriptError from prefect.logging.loggers import get_logger from prefect.utilities.filesystem import filename, is_local_path, tmpchdir -logger = get_logger(__name__) +logger: Logger = get_logger(__name__) def to_qualified_name(obj: Any) -> str: @@ -70,7 +73,9 @@ def from_qualified_name(name: str) -> Any: return getattr(module, attr_name) -def objects_from_script(path: str, text: Union[str, bytes] = None) -> Dict[str, Any]: +def objects_from_script( + path: str, text: Optional[Union[str, bytes]] = None +) -> dict[str, Any]: """ Run a python script and return all the global variables @@ -97,7 +102,7 @@ def objects_from_script(path: str, text: Union[str, bytes] = None) -> Dict[str, ScriptError: if the script raises an exception during execution """ - def run_script(run_path: str): + def run_script(run_path: str) -> dict[str, Any]: # Cast to an absolute path before changing directories to ensure relative paths # are not broken abs_run_path = os.path.abspath(run_path) @@ -120,7 +125,9 @@ def run_script(run_path: str): else: if not is_local_path(path): # Remote paths need to be local to run - with fsspec.open(path) as f: + with fsspec.open(path) as f: # type: ignore # no typing stubs available + if TYPE_CHECKING: + assert isinstance(f, TextIOWrapper) contents = f.read() return objects_from_script(path, contents) else: @@ -156,6 +163,10 @@ def load_script_as_module(path: str) -> ModuleType: # Support explicit relative imports i.e. `from .foo import bar` submodule_search_locations=[parent_path, working_directory], ) + if TYPE_CHECKING: + assert spec is not None + assert spec.loader is not None + module = importlib.util.module_from_spec(spec) sys.modules["__prefect_loader__"] = module @@ -189,7 +200,7 @@ def load_module(module_name: str) -> ModuleType: sys.path.remove(working_directory) -def import_object(import_path: str): +def import_object(import_path: str) -> Any: """ Load an object from an import path. @@ -228,22 +239,20 @@ class DelayedImportErrorModule(ModuleType): [1]: https://github.com/scientific-python/lazy_loader """ - def __init__(self, error_message, help_message, *args, **kwargs): + def __init__(self, error_message: str, help_message: Optional[str] = None) -> None: self.__error_message = error_message - self.__help_message = ( - help_message or "Import errors for this module are only reported when used." - ) - super().__init__(*args, **kwargs) + if not help_message: + help_message = "Import errors for this module are only reported when used." + super().__init__("DelayedImportErrorModule", help_message) - def __getattr__(self, attr): - if attr in ("__class__", "__file__", "__help_message"): - super().__getattr__(attr) - else: - raise ModuleNotFoundError(self.__error_message) + def __getattr__(self, attr: str) -> Any: + if attr == "__file__": # not set but should result in an attribute error? + return super().__getattr__(attr) + raise ModuleNotFoundError(self.__error_message) def lazy_import( - name: str, error_on_import: bool = False, help_message: str = "" + name: str, error_on_import: bool = False, help_message: Optional[str] = None ) -> ModuleType: """ Create a lazily-imported module to use in place of the module of the given name. @@ -282,13 +291,13 @@ def lazy_import( if error_on_import: raise ModuleNotFoundError(import_error_message) - return DelayedImportErrorModule( - import_error_message, help_message, "DelayedImportErrorModule" - ) + return DelayedImportErrorModule(import_error_message, help_message) module = importlib.util.module_from_spec(spec) sys.modules[name] = module + if TYPE_CHECKING: + assert spec.loader is not None loader = importlib.util.LazyLoader(spec.loader) loader.exec_module(module) @@ -317,13 +326,13 @@ def __init__(self, aliases: Iterable[AliasedModuleDefinition]): Aliases apply to all modules nested within an alias. """ - self.aliases = aliases + self.aliases: list[AliasedModuleDefinition] = list(aliases) def find_spec( self, fullname: str, - path=None, - target=None, + path: Optional[Sequence[str]] = None, + target: Optional[ModuleType] = None, ) -> Optional[ModuleSpec]: """ The fullname is the imported path, e.g. "foo.bar". If there is an alias "phi" @@ -334,6 +343,7 @@ def find_spec( if fullname.startswith(alias): # Retrieve the spec of the real module real_spec = importlib.util.find_spec(fullname.replace(alias, real, 1)) + assert real_spec is not None # Create a new spec for the alias return ModuleSpec( fullname, @@ -354,7 +364,7 @@ def __init__( self.callback = callback self.real_spec = real_spec - def exec_module(self, _: ModuleType) -> None: + def exec_module(self, module: ModuleType) -> None: root_module = importlib.import_module(self.real_spec.name) if self.callback is not None: self.callback(self.alias) @@ -363,7 +373,7 @@ def exec_module(self, _: ModuleType) -> None: def safe_load_namespace( source_code: str, filepath: Optional[str] = None -) -> Dict[str, Any]: +) -> dict[str, Any]: """ Safely load a namespace from source code, optionally handling relative imports. @@ -380,7 +390,7 @@ def safe_load_namespace( """ parsed_code = ast.parse(source_code) - namespace: Dict[str, Any] = {"__name__": "prefect_safe_namespace_loader"} + namespace: dict[str, Any] = {"__name__": "prefect_safe_namespace_loader"} # Remove the body of the if __name__ == "__main__": block new_body = [node for node in parsed_code.body if not _is_main_block(node)] @@ -427,6 +437,9 @@ def safe_load_namespace( try: if node.level > 0: # For relative imports, use the parent package to inform the import + if TYPE_CHECKING: + assert temp_module is not None + assert temp_module.__package__ is not None package_parts = temp_module.__package__.split(".") if len(package_parts) < node.level: raise ImportError( diff --git a/src/prefect/utilities/math.py b/src/prefect/utilities/math.py index 2ece5eb85fa3..9daca1c74186 100644 --- a/src/prefect/utilities/math.py +++ b/src/prefect/utilities/math.py @@ -2,7 +2,9 @@ import random -def poisson_interval(average_interval, lower=0, upper=1): +def poisson_interval( + average_interval: float, lower: float = 0, upper: float = 1 +) -> float: """ Generates an "inter-arrival time" for a Poisson process. @@ -16,12 +18,12 @@ def poisson_interval(average_interval, lower=0, upper=1): return -math.log(max(1 - random.uniform(lower, upper), 1e-10)) * average_interval -def exponential_cdf(x, average_interval): +def exponential_cdf(x: float, average_interval: float) -> float: ld = 1 / average_interval return 1 - math.exp(-ld * x) -def lower_clamp_multiple(k): +def lower_clamp_multiple(k: float) -> float: """ Computes a lower clamp multiple that can be used to bound a random variate drawn from an exponential distribution. @@ -38,7 +40,9 @@ def lower_clamp_multiple(k): return math.log(max(2**k / (2**k - 1), 1e-10), 2) -def clamped_poisson_interval(average_interval, clamping_factor=0.3): +def clamped_poisson_interval( + average_interval: float, clamping_factor: float = 0.3 +) -> float: """ Bounds Poisson "inter-arrival times" to a range defined by the clamping factor. @@ -57,7 +61,7 @@ def clamped_poisson_interval(average_interval, clamping_factor=0.3): return poisson_interval(average_interval, lower_rv, upper_rv) -def bounded_poisson_interval(lower_bound, upper_bound): +def bounded_poisson_interval(lower_bound: float, upper_bound: float) -> float: """ Bounds Poisson "inter-arrival times" to a range. diff --git a/src/prefect/utilities/names.py b/src/prefect/utilities/names.py index 6be2b93ab707..baeb2b1b6475 100644 --- a/src/prefect/utilities/names.py +++ b/src/prefect/utilities/names.py @@ -1,6 +1,6 @@ from typing import Any -import coolname +import coolname # type: ignore # the version after coolname 2.2.0 should have stubs. OBFUSCATED_PREFIX = "****" @@ -42,7 +42,7 @@ def generate_slug(n_words: int) -> str: return "-".join(words) -def obfuscate(s: Any, show_tail=False) -> str: +def obfuscate(s: Any, show_tail: bool = False) -> str: """ Obfuscates any data type's string representation. See `obfuscate_string`. """ @@ -52,7 +52,7 @@ def obfuscate(s: Any, show_tail=False) -> str: return obfuscate_string(str(s), show_tail=show_tail) -def obfuscate_string(s: str, show_tail=False) -> str: +def obfuscate_string(s: str, show_tail: bool = False) -> str: """ Obfuscates a string by returning a new string of 8 characters. If the input string is longer than 10 characters and show_tail is True, then up to 4 of diff --git a/src/prefect/utilities/processutils.py b/src/prefect/utilities/processutils.py index aeb1a37e83ed..7951fac4cae5 100644 --- a/src/prefect/utilities/processutils.py +++ b/src/prefect/utilities/processutils.py @@ -4,28 +4,35 @@ import subprocess import sys import threading +from collections.abc import AsyncGenerator, Mapping from contextlib import asynccontextmanager from dataclasses import dataclass from functools import partial +from types import FrameType from typing import ( IO, + TYPE_CHECKING, Any, + AnyStr, Callable, - List, - Mapping, Optional, - Sequence, TextIO, - Tuple, Union, + cast, + overload, ) import anyio import anyio.abc from anyio.streams.text import TextReceiveStream, TextSendStream +from typing_extensions import TypeAlias, TypeVar -TextSink = Union[anyio.AsyncFile, TextIO, TextSendStream] +if TYPE_CHECKING: + from _typeshed import StrOrBytesPath +TextSink: TypeAlias = Union[anyio.AsyncFile[AnyStr], TextIO, TextSendStream] +PrintFn: TypeAlias = Callable[[str], object] +T = TypeVar("T", infer_variance=True) if sys.platform == "win32": from ctypes import WINFUNCTYPE, c_int, c_uint, windll @@ -33,7 +40,7 @@ _windows_process_group_pids = set() @WINFUNCTYPE(c_int, c_uint) - def _win32_ctrl_handler(dwCtrlType): + def _win32_ctrl_handler(dwCtrlType: object) -> int: """ A callback function for handling CTRL events cleanly on Windows. When called, this function will terminate all running win32 subprocesses the current @@ -125,16 +132,16 @@ def stderr(self) -> Union[anyio.abc.ByteReceiveStream, None]: return self._stderr async def _open_anyio_process( - command: Union[str, bytes, Sequence[Union[str, bytes]]], + command: Union[str, bytes, list["StrOrBytesPath"]], *, stdin: Union[int, IO[Any], None] = None, stdout: Union[int, IO[Any], None] = None, stderr: Union[int, IO[Any], None] = None, - cwd: Union[str, bytes, os.PathLike, None] = None, - env: Union[Mapping[str, str], None] = None, + cwd: Optional["StrOrBytesPath"] = None, + env: Optional[Mapping[str, str]] = None, start_new_session: bool = False, - **kwargs, - ): + **kwargs: Any, + ) -> Process: """ Open a subprocess and return a `Process` object. @@ -179,7 +186,9 @@ async def _open_anyio_process( @asynccontextmanager -async def open_process(command: List[str], **kwargs): +async def open_process( + command: list[str], **kwargs: Any +) -> AsyncGenerator[anyio.abc.Process, Any]: """ Like `anyio.open_process` but with: - Support for Windows command joining @@ -189,11 +198,12 @@ async def open_process(command: List[str], **kwargs): # Passing a string to open_process is equivalent to shell=True which is # generally necessary for Unix-like commands on Windows but otherwise should # be avoided - if not isinstance(command, list): - raise TypeError( - "The command passed to open process must be a list. You passed the command" - f"'{command}', which is type '{type(command)}'." - ) + if not TYPE_CHECKING: + if not isinstance(command, list): + raise TypeError( + "The command passed to open process must be a list. You passed the command" + f"'{command}', which is type '{type(command)}'." + ) if sys.platform == "win32": command = " ".join(command) @@ -222,7 +232,7 @@ async def open_process(command: List[str], **kwargs): finally: try: process.terminate() - if win32_process_group: + if sys.platform == "win32" and win32_process_group: _windows_process_group_pids.remove(process.pid) except OSError: @@ -236,13 +246,58 @@ async def open_process(command: List[str], **kwargs): await process.aclose() +@overload +async def run_process( + command: list[str], + *, + stream_output: Union[ + bool, tuple[Optional[TextSink[str]], Optional[TextSink[str]]] + ] = ..., + task_status: anyio.abc.TaskStatus[T] = ..., + task_status_handler: Callable[[anyio.abc.Process], T] = ..., + **kwargs: Any, +) -> anyio.abc.Process: + ... + + +@overload +async def run_process( + command: list[str], + *, + stream_output: Union[ + bool, tuple[Optional[TextSink[str]], Optional[TextSink[str]]] + ] = ..., + task_status: Optional[anyio.abc.TaskStatus[int]] = ..., + task_status_handler: None = None, + **kwargs: Any, +) -> anyio.abc.Process: + ... + + +@overload +async def run_process( + command: list[str], + *, + stream_output: Union[ + bool, tuple[Optional[TextSink[str]], Optional[TextSink[str]]] + ] = False, + task_status: Optional[anyio.abc.TaskStatus[T]] = None, + task_status_handler: Optional[Callable[[anyio.abc.Process], T]] = None, + **kwargs: Any, +) -> anyio.abc.Process: + ... + + async def run_process( - command: List[str], - stream_output: Union[bool, Tuple[Optional[TextSink], Optional[TextSink]]] = False, - task_status: Optional[anyio.abc.TaskStatus] = None, - task_status_handler: Optional[Callable[[anyio.abc.Process], Any]] = None, - **kwargs, -): + command: list[str], + *, + stream_output: Union[ + bool, tuple[Optional[TextSink[str]], Optional[TextSink[str]]] + ] = False, + task_status: Optional[anyio.abc.TaskStatus[T]] = None, + task_status_handler: Optional[Callable[[anyio.abc.Process], T]] = None, + **kwargs: Any, +) -> anyio.abc.Process: """ Like `anyio.run_process` but with: @@ -262,12 +317,10 @@ async def run_process( **kwargs, ) as process: if task_status is not None: - if not task_status_handler: - - def task_status_handler(process): - return process.pid - - task_status.started(task_status_handler(process)) + value: T = cast(T, process.pid) + if task_status_handler: + value = task_status_handler(process) + task_status.started(value) if stream_output: await consume_process_output( @@ -280,31 +333,36 @@ def task_status_handler(process): async def consume_process_output( - process, - stdout_sink: Optional[TextSink] = None, - stderr_sink: Optional[TextSink] = None, -): + process: anyio.abc.Process, + stdout_sink: Optional[TextSink[str]] = None, + stderr_sink: Optional[TextSink[str]] = None, +) -> None: async with anyio.create_task_group() as tg: - tg.start_soon( - stream_text, - TextReceiveStream(process.stdout), - stdout_sink, - ) - tg.start_soon( - stream_text, - TextReceiveStream(process.stderr), - stderr_sink, - ) + if process.stdout is not None: + tg.start_soon( + stream_text, + TextReceiveStream(process.stdout), + stdout_sink, + ) + if process.stderr is not None: + tg.start_soon( + stream_text, + TextReceiveStream(process.stderr), + stderr_sink, + ) -async def stream_text(source: TextReceiveStream, *sinks: TextSink): +async def stream_text( + source: TextReceiveStream, *sinks: Optional[TextSink[str]] +) -> None: wrapped_sinks = [ ( - anyio.wrap_file(sink) + anyio.wrap_file(cast(IO[str], sink)) if hasattr(sink, "write") and hasattr(sink, "flush") else sink ) for sink in sinks + if sink is not None ] async for item in source: for sink in wrapped_sinks: @@ -313,30 +371,32 @@ async def stream_text(source: TextReceiveStream, *sinks: TextSink): elif isinstance(sink, anyio.AsyncFile): await sink.write(item) await sink.flush() - elif sink is None: - pass # Consume the item but perform no action - else: - raise TypeError(f"Unsupported sink type {type(sink).__name__}") -def _register_signal(signum: int, handler: Callable): +def _register_signal( + signum: int, + handler: Optional[ + Union[Callable[[int, Optional[FrameType]], Any], int, signal.Handlers] + ], +) -> None: if threading.current_thread() is threading.main_thread(): signal.signal(signum, handler) def forward_signal_handler( - pid: int, signum: int, *signums: int, process_name: str, print_fn: Callable -): + pid: int, signum: int, *signums: int, process_name: str, print_fn: PrintFn +) -> None: """Forward subsequent signum events (e.g. interrupts) to respective signums.""" current_signal, future_signals = signums[0], signums[1:] # avoid RecursionError when setting up a direct signal forward to the same signal for the main pid + original_handler = None avoid_infinite_recursion = signum == current_signal and pid == os.getpid() if avoid_infinite_recursion: # store the vanilla handler so it can be temporarily restored below original_handler = signal.getsignal(current_signal) - def handler(*args): + def handler(*arg: Any) -> None: print_fn( f"Received {getattr(signum, 'name', signum)}. " f"Sending {getattr(current_signal, 'name', current_signal)} to" @@ -358,7 +418,9 @@ def handler(*args): _register_signal(signum, handler) -def setup_signal_handlers_server(pid: int, process_name: str, print_fn: Callable): +def setup_signal_handlers_server( + pid: int, process_name: str, print_fn: PrintFn +) -> None: """Handle interrupts of the server gracefully.""" setup_handler = partial( forward_signal_handler, pid, process_name=process_name, print_fn=print_fn @@ -375,7 +437,7 @@ def setup_signal_handlers_server(pid: int, process_name: str, print_fn: Callable setup_handler(signal.SIGTERM, signal.SIGTERM, signal.SIGKILL) -def setup_signal_handlers_agent(pid: int, process_name: str, print_fn: Callable): +def setup_signal_handlers_agent(pid: int, process_name: str, print_fn: PrintFn) -> None: """Handle interrupts of the agent gracefully.""" setup_handler = partial( forward_signal_handler, pid, process_name=process_name, print_fn=print_fn @@ -393,7 +455,9 @@ def setup_signal_handlers_agent(pid: int, process_name: str, print_fn: Callable) setup_handler(signal.SIGTERM, signal.SIGINT, signal.SIGKILL) -def setup_signal_handlers_worker(pid: int, process_name: str, print_fn: Callable): +def setup_signal_handlers_worker( + pid: int, process_name: str, print_fn: PrintFn +) -> None: """Handle interrupts of workers gracefully.""" setup_handler = partial( forward_signal_handler, pid, process_name=process_name, print_fn=print_fn diff --git a/src/prefect/utilities/pydantic.py b/src/prefect/utilities/pydantic.py index 8f09afaaa4e1..6086931fbbbd 100644 --- a/src/prefect/utilities/pydantic.py +++ b/src/prefect/utilities/pydantic.py @@ -1,18 +1,18 @@ -from functools import partial from typing import ( Any, Callable, - Dict, Generic, Optional, - Type, TypeVar, + Union, cast, get_origin, overload, ) -from jsonpatch import JsonPatch as JsonPatchBase +from jsonpatch import ( # type: ignore # no typing stubs available, see https://github.com/stefankoegl/python-json-patch/issues/158 + JsonPatch as JsonPatchBase, +) from pydantic import ( BaseModel, GetJsonSchemaHandler, @@ -33,7 +33,7 @@ T = TypeVar("T", bound=Any) -def _reduce_model(model: BaseModel): +def _reduce_model(self: BaseModel) -> tuple[Any, ...]: """ Helper for serializing a cythonized model with cloudpickle. @@ -43,31 +43,33 @@ def _reduce_model(model: BaseModel): return ( _unreduce_model, ( - to_qualified_name(type(model)), - model.model_dump_json(**getattr(model, "__reduce_kwargs__", {})), + to_qualified_name(type(self)), + self.model_dump_json(**getattr(self, "__reduce_kwargs__", {})), ), ) -def _unreduce_model(model_name, json): +def _unreduce_model(model_name: str, json: str) -> Any: """Helper for restoring model after serialization""" model = from_qualified_name(model_name) return model.model_validate_json(json) @overload -def add_cloudpickle_reduction(__model_cls: Type[M]) -> Type[M]: +def add_cloudpickle_reduction(__model_cls: type[M]) -> type[M]: ... @overload def add_cloudpickle_reduction( - **kwargs: Any, -) -> Callable[[Type[M]], Type[M]]: + __model_cls: None = None, **kwargs: Any +) -> Callable[[type[M]], type[M]]: ... -def add_cloudpickle_reduction(__model_cls: Optional[Type[M]] = None, **kwargs: Any): +def add_cloudpickle_reduction( + __model_cls: Optional[type[M]] = None, **kwargs: Any +) -> Union[type[M], Callable[[type[M]], type[M]]]: """ Adds a `__reducer__` to the given class that ensures it is cloudpickle compatible. @@ -85,25 +87,22 @@ def add_cloudpickle_reduction(__model_cls: Optional[Type[M]] = None, **kwargs: A """ if __model_cls: __model_cls.__reduce__ = _reduce_model - __model_cls.__reduce_kwargs__ = kwargs + setattr(__model_cls, "__reduce_kwargs__", kwargs) return __model_cls - else: - return cast( - Callable[[Type[M]], Type[M]], - partial( - add_cloudpickle_reduction, - **kwargs, - ), - ) + + def reducer_with_kwargs(__model_cls: type[M]) -> type[M]: + return add_cloudpickle_reduction(__model_cls, **kwargs) + + return reducer_with_kwargs -def get_class_fields_only(model: Type[BaseModel]) -> set: +def get_class_fields_only(model: type[BaseModel]) -> set[str]: """ Gets all the field names defined on the model class but not any parent classes. Any fields that are on the parent but redefined on the subclass are included. """ subclass_class_fields = set(model.__annotations__.keys()) - parent_class_fields = set() + parent_class_fields: set[str] = set() for base in model.__class__.__bases__: if issubclass(base, BaseModel): @@ -114,7 +113,7 @@ def get_class_fields_only(model: Type[BaseModel]) -> set: ) -def add_type_dispatch(model_cls: Type[M]) -> Type[M]: +def add_type_dispatch(model_cls: type[M]) -> type[M]: """ Extend a Pydantic model to add a 'type' field that is used as a discriminator field to dynamically determine the subtype that when deserializing models. @@ -149,7 +148,7 @@ def add_type_dispatch(model_cls: Type[M]) -> Type[M]: elif not defines_dispatch_key and defines_type_field: field_type_annotation = model_cls.model_fields["type"].annotation - if field_type_annotation != str: + if field_type_annotation != str and field_type_annotation is not None: raise TypeError( f"Model class {model_cls.__name__!r} defines a 'type' field with " f"type {field_type_annotation.__name__!r} but it must be 'str'." @@ -157,10 +156,10 @@ def add_type_dispatch(model_cls: Type[M]) -> Type[M]: # Set the dispatch key to retrieve the value from the type field @classmethod - def dispatch_key_from_type_field(cls): + def dispatch_key_from_type_field(cls: type[M]) -> str: return cls.model_fields["type"].default - model_cls.__dispatch_key__ = dispatch_key_from_type_field + setattr(model_cls, "__dispatch_key__", dispatch_key_from_type_field) else: raise ValueError( @@ -171,7 +170,7 @@ def dispatch_key_from_type_field(cls): cls_init = model_cls.__init__ cls_new = model_cls.__new__ - def __init__(__pydantic_self__, **data: Any) -> None: + def __init__(__pydantic_self__: M, **data: Any) -> None: type_string = ( get_dispatch_key(__pydantic_self__) if type(__pydantic_self__) != model_cls @@ -180,12 +179,16 @@ def __init__(__pydantic_self__, **data: Any) -> None: data.setdefault("type", type_string) cls_init(__pydantic_self__, **data) - def __new__(cls: Type[M], **kwargs: Any) -> M: + def __new__(cls: type[M], **kwargs: Any) -> M: if "type" in kwargs: try: subcls = lookup_type(cls, dispatch_key=kwargs["type"]) except KeyError as exc: - raise ValidationError(errors=[exc], model=cls) + raise ValidationError.from_exception_data( + title=cls.__name__, + line_errors=[{"type": str(exc), "input": kwargs["type"]}], + input_type="python", + ) return cls_new(subcls) else: return cls_new(cls) @@ -221,7 +224,7 @@ class PartialModel(Generic[M]): >>> model = partial_model.finalize(z=3.0) """ - def __init__(self, __model_cls: Type[M], **kwargs: Any) -> None: + def __init__(self, __model_cls: type[M], **kwargs: Any) -> None: self.fields = kwargs # Set fields first to avoid issues if `fields` is also set on the `model_cls` # in our custom `setattr` implementation. @@ -236,11 +239,11 @@ def finalize(self, **kwargs: Any) -> M: self.raise_if_not_in_model(name) return self.model_cls(**self.fields, **kwargs) - def raise_if_already_set(self, name): + def raise_if_already_set(self, name: str) -> None: if name in self.fields: raise ValueError(f"Field {name!r} has already been set.") - def raise_if_not_in_model(self, name): + def raise_if_not_in_model(self, name: str) -> None: if name not in self.model_cls.model_fields: raise ValueError(f"Field {name!r} is not present in the model.") @@ -290,7 +293,7 @@ def __get_pydantic_json_schema__( def custom_pydantic_encoder( - type_encoders: Optional[Dict[Any, Callable[[Type[Any]], Any]]], obj: Any + type_encoders: dict[Any, Callable[[type[Any]], Any]], obj: Any ) -> Any: # Check the class type and its superclasses for a matching encoder for base in obj.__class__.__mro__[:-1]: @@ -359,8 +362,10 @@ class ExampleModel(BaseModel): """ adapter = TypeAdapter(type_) - if get_origin(type_) is list and isinstance(data, dict): - data = next(iter(data.values())) + origin: Optional[Any] = get_origin(type_) + if origin is list and isinstance(data, dict): + values_dict: dict[Any, Any] = data + data = next(iter(values_dict.values())) parser: Callable[[Any], T] = getattr(adapter, f"validate_{mode}") diff --git a/src/prefect/utilities/render_swagger.py b/src/prefect/utilities/render_swagger.py index ac02ee985fc6..82008ed3192f 100644 --- a/src/prefect/utilities/render_swagger.py +++ b/src/prefect/utilities/render_swagger.py @@ -8,10 +8,13 @@ import string import urllib.parse from pathlib import Path +from typing import Any, Optional, cast from xml.sax.saxutils import escape import mkdocs.plugins -from mkdocs.structure.files import File +from mkdocs.config.defaults import MkDocsConfig +from mkdocs.structure.files import File, Files +from mkdocs.structure.pages import Page USAGE_MSG = ( "Usage: '!!swagger !!' or '!!swagger-http !!'. " @@ -50,7 +53,7 @@ TOKEN_HTTP = re.compile(r"!!swagger-http(?: (?Phttps?://[^\s]+))?!!") -def swagger_lib(config) -> dict: +def swagger_lib(config: MkDocsConfig) -> dict[str, Any]: """ Provides the actual swagger library used """ @@ -59,11 +62,14 @@ def swagger_lib(config) -> dict: "js": "https://unpkg.com/swagger-ui-dist@3/swagger-ui-bundle.js", } - extra_javascript = config.get("extra_javascript", []) - extra_css = config.get("extra_css", []) + extra_javascript = config.extra_javascript + extra_css = cast(list[str], config.extra_css) for lib in extra_javascript: - if os.path.basename(urllib.parse.urlparse(lib).path) == "swagger-ui-bundle.js": - lib_swagger["js"] = lib + if ( + os.path.basename(urllib.parse.urlparse(str(lib)).path) + == "swagger-ui-bundle.js" + ): + lib_swagger["js"] = str(lib) break for css in extra_css: @@ -73,8 +79,10 @@ def swagger_lib(config) -> dict: return lib_swagger -class SwaggerPlugin(mkdocs.plugins.BasePlugin): - def on_page_markdown(self, markdown, page, config, files): +class SwaggerPlugin(mkdocs.plugins.BasePlugin[Any]): + def on_page_markdown( + self, markdown: str, /, *, page: Page, config: MkDocsConfig, files: Files + ) -> Optional[str]: is_http = False match = TOKEN.search(markdown) @@ -88,7 +96,7 @@ def on_page_markdown(self, markdown, page, config, files): pre_token = markdown[: match.start()] post_token = markdown[match.end() :] - def _error(message): + def _error(message: str) -> str: return ( pre_token + escape(ERROR_TEMPLATE.substitute(error=message)) @@ -103,8 +111,10 @@ def _error(message): if is_http: url = path else: + base = page.file.abs_src_path + assert base is not None try: - api_file = Path(page.file.abs_src_path).with_name(path) + api_file = Path(base).with_name(path) except ValueError as exc: return _error(f"Invalid path. {exc.args[0]}") @@ -114,7 +124,7 @@ def _error(message): src_dir = api_file.parent dest_dir = Path(page.file.abs_dest_path).parent - new_file = File(api_file.name, src_dir, dest_dir, False) + new_file = File(api_file.name, str(src_dir), str(dest_dir), False) files.append(new_file) url = Path(new_file.abs_dest_path).name @@ -129,4 +139,4 @@ def _error(message): ) # If multiple swaggers exist. - return self.on_page_markdown(markdown, page, config, files) + return self.on_page_markdown(markdown, page=page, config=config, files=files) diff --git a/src/prefect/utilities/schema_tools/__init__.py b/src/prefect/utilities/schema_tools/__init__.py index 1e6e73fc372a..bfd382af2b5f 100644 --- a/src/prefect/utilities/schema_tools/__init__.py +++ b/src/prefect/utilities/schema_tools/__init__.py @@ -2,8 +2,8 @@ from .validation import ( CircularSchemaRefError, ValidationError, - validate, is_valid_schema, + validate, ) __all__ = [ @@ -12,5 +12,6 @@ "HydrationError", "ValidationError", "hydrate", + "is_valid_schema", "validate", ] diff --git a/src/prefect/utilities/schema_tools/hydration.py b/src/prefect/utilities/schema_tools/hydration.py index 49bd1bc8b33d..3cbb8e97804d 100644 --- a/src/prefect/utilities/schema_tools/hydration.py +++ b/src/prefect/utilities/schema_tools/hydration.py @@ -1,10 +1,12 @@ import json -from typing import Any, Callable, Dict, Optional +from abc import ABC, abstractmethod +from collections.abc import Callable, Sequence +from typing import Any, Optional, cast import jinja2 from pydantic import BaseModel, Field from sqlalchemy.ext.asyncio import AsyncSession -from typing_extensions import TypeAlias +from typing_extensions import Self, TypeAlias, TypeIs from prefect.server.utilities.user_templates import ( TemplateSecurityError, @@ -15,14 +17,14 @@ class HydrationContext(BaseModel): - workspace_variables: Dict[ + workspace_variables: dict[ str, StrictVariableValue, ] = Field(default_factory=dict) render_workspace_variables: bool = Field(default=False) raise_on_error: bool = Field(default=False) render_jinja: bool = Field(default=False) - jinja_context: Dict[str, Any] = Field(default_factory=dict) + jinja_context: dict[str, Any] = Field(default_factory=dict) @classmethod async def build( @@ -31,9 +33,11 @@ async def build( raise_on_error: bool = False, render_jinja: bool = False, render_workspace_variables: bool = False, - ) -> "HydrationContext": + ) -> Self: + from prefect.server.database.orm_models import Variable from prefect.server.models.variables import read_variables + variables: Sequence[Variable] if render_workspace_variables: variables = await read_variables( session=session, @@ -51,14 +55,14 @@ async def build( ) -Handler: TypeAlias = Callable[[dict, HydrationContext], Any] +Handler: TypeAlias = Callable[[dict[str, Any], HydrationContext], Any] PrefectKind: TypeAlias = Optional[str] -_handlers: Dict[PrefectKind, Handler] = {} +_handlers: dict[PrefectKind, Handler] = {} class Placeholder: - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: return isinstance(other, type(self)) @property @@ -70,11 +74,11 @@ class RemoveValue(Placeholder): pass -def _remove_value(value) -> bool: +def _remove_value(value: Any) -> TypeIs[RemoveValue]: return isinstance(value, RemoveValue) -class HydrationError(Placeholder, Exception): +class HydrationError(Placeholder, Exception, ABC): def __init__(self, detail: Optional[str] = None): self.detail = detail @@ -83,47 +87,49 @@ def is_error(self) -> bool: return True @property - def message(self): + @abstractmethod + def message(self) -> str: raise NotImplementedError("Must be implemented by subclass") - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: return isinstance(other, type(self)) and self.message == other.message - def __str__(self): + def __str__(self) -> str: return self.message class KeyNotFound(HydrationError): @property - def message(self): + def message(self) -> str: return f"Missing '{self.key}' key in __prefect object" @property + @abstractmethod def key(self) -> str: raise NotImplementedError("Must be implemented by subclass") class ValueNotFound(KeyNotFound): @property - def key(self): + def key(self) -> str: return "value" class TemplateNotFound(KeyNotFound): @property - def key(self): + def key(self) -> str: return "template" class VariableNameNotFound(KeyNotFound): @property - def key(self): + def key(self) -> str: return "variable_name" class InvalidJSON(HydrationError): @property - def message(self): + def message(self) -> str: message = "Invalid JSON" if self.detail: message += f": {self.detail}" @@ -132,7 +138,7 @@ def message(self): class InvalidJinja(HydrationError): @property - def message(self): + def message(self) -> str: message = "Invalid jinja" if self.detail: message += f": {self.detail}" @@ -146,29 +152,29 @@ def variable_name(self) -> str: return self.detail @property - def message(self): + def message(self) -> str: return f"Variable '{self.detail}' not found in workspace." class WorkspaceVariable(Placeholder): - def __init__(self, variable_name: str): + def __init__(self, variable_name: str) -> None: self.variable_name = variable_name - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: return ( isinstance(other, type(self)) and self.variable_name == other.variable_name ) class ValidJinja(Placeholder): - def __init__(self, template: str): + def __init__(self, template: str) -> None: self.template = template - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: return isinstance(other, type(self)) and self.template == other.template -def handler(kind: PrefectKind) -> Callable: +def handler(kind: PrefectKind) -> Callable[[Handler], Handler]: def decorator(func: Handler) -> Handler: _handlers[kind] = func return func @@ -176,9 +182,9 @@ def decorator(func: Handler) -> Handler: return decorator -def call_handler(kind: PrefectKind, obj: dict, ctx: HydrationContext) -> Any: +def call_handler(kind: PrefectKind, obj: dict[str, Any], ctx: HydrationContext) -> Any: if kind not in _handlers: - return (obj or {}).get("value", None) + return obj.get("value", None) res = _handlers[kind](obj, ctx) if ctx.raise_on_error and isinstance(res, HydrationError): @@ -187,7 +193,7 @@ def call_handler(kind: PrefectKind, obj: dict, ctx: HydrationContext) -> Any: @handler("none") -def null_handler(obj: dict, ctx: HydrationContext): +def null_handler(obj: dict[str, Any], ctx: HydrationContext): if "value" in obj: # null handler is a pass through, so we want to continue to hydrate return _hydrate(obj["value"], ctx) @@ -196,7 +202,7 @@ def null_handler(obj: dict, ctx: HydrationContext): @handler("json") -def json_handler(obj: dict, ctx: HydrationContext): +def json_handler(obj: dict[str, Any], ctx: HydrationContext): if "value" in obj: if isinstance(obj["value"], dict): dehydrated_json = _hydrate(obj["value"], ctx) @@ -222,7 +228,7 @@ def json_handler(obj: dict, ctx: HydrationContext): @handler("jinja") -def jinja_handler(obj: dict, ctx: HydrationContext): +def jinja_handler(obj: dict[str, Any], ctx: HydrationContext) -> Any: if "template" in obj: if isinstance(obj["template"], dict): dehydrated_jinja = _hydrate(obj["template"], ctx) @@ -247,7 +253,7 @@ def jinja_handler(obj: dict, ctx: HydrationContext): @handler("workspace_variable") -def workspace_variable_handler(obj: dict, ctx: HydrationContext): +def workspace_variable_handler(obj: dict[str, Any], ctx: HydrationContext) -> Any: if "variable_name" in obj: if isinstance(obj["variable_name"], dict): dehydrated_variable = _hydrate(obj["variable_name"], ctx) @@ -259,7 +265,7 @@ def workspace_variable_handler(obj: dict, ctx: HydrationContext): return dehydrated_variable if not ctx.render_workspace_variables: - return WorkspaceVariable(variable_name=obj["variable_name"]) + return WorkspaceVariable(variable_name=dehydrated_variable) if dehydrated_variable in ctx.workspace_variables: return ctx.workspace_variables[dehydrated_variable] @@ -277,35 +283,36 @@ def workspace_variable_handler(obj: dict, ctx: HydrationContext): return RemoveValue() -def hydrate(obj: dict, ctx: Optional[HydrationContext] = None): - res = _hydrate(obj, ctx) +def hydrate( + obj: dict[str, Any], ctx: Optional[HydrationContext] = None +) -> dict[str, Any]: + res: dict[str, Any] = _hydrate(obj, ctx) if _remove_value(res): - return {} + res = {} return res -def _hydrate(obj, ctx: Optional[HydrationContext] = None) -> Any: +def _hydrate(obj: Any, ctx: Optional[HydrationContext] = None) -> Any: if ctx is None: ctx = HydrationContext() - prefect_object = isinstance(obj, dict) and "__prefect_kind" in obj - - if prefect_object: - prefect_kind = obj.get("__prefect_kind") - return call_handler(prefect_kind, obj, ctx) + if isinstance(obj, dict) and "__prefect_kind" in obj: + obj_dict: dict[str, Any] = obj + prefect_kind = obj_dict["__prefect_kind"] + return call_handler(prefect_kind, obj_dict, ctx) else: if isinstance(obj, dict): return { key: hydrated_value - for key, value in obj.items() + for key, value in cast(dict[str, Any], obj).items() if not _remove_value(hydrated_value := _hydrate(value, ctx)) } elif isinstance(obj, list): return [ hydrated_element - for element in obj + for element in cast(list[Any], obj) if not _remove_value(hydrated_element := _hydrate(element, ctx)) ] else: diff --git a/src/prefect/utilities/schema_tools/validation.py b/src/prefect/utilities/schema_tools/validation.py index e94204331125..1dfdd7a3607b 100644 --- a/src/prefect/utilities/schema_tools/validation.py +++ b/src/prefect/utilities/schema_tools/validation.py @@ -1,14 +1,19 @@ from collections import defaultdict, deque +from collections.abc import Callable, Iterable, Iterator from copy import deepcopy -from typing import Any, Dict, List +from typing import TYPE_CHECKING, Any, cast import jsonschema from jsonschema.exceptions import ValidationError as JSONSchemaValidationError from jsonschema.validators import Draft202012Validator, create +from referencing.jsonschema import ObjectSchema, Schema from prefect.utilities.collections import remove_nested_keys from prefect.utilities.schema_tools.hydration import HydrationError, Placeholder +if TYPE_CHECKING: + from jsonschema.validators import _Validator # type: ignore + class CircularSchemaRefError(Exception): pass @@ -21,12 +26,16 @@ class ValidationError(Exception): PLACEHOLDERS_VALIDATOR_NAME = "_placeholders" -def _build_validator(): - def _applicable_validators(schema): +def _build_validator() -> type["_Validator"]: + def _applicable_validators(schema: Schema) -> Iterable[tuple[str, Any]]: # the default implementation returns `schema.items()` - return {**schema, PLACEHOLDERS_VALIDATOR_NAME: None}.items() + assert not isinstance(schema, bool) + schema = {**schema, PLACEHOLDERS_VALIDATOR_NAME: None} + return schema.items() - def _placeholders(validator, _, instance, schema): + def _placeholders( + _validator: "_Validator", _property: object, instance: Any, _schema: Schema + ) -> Iterator[JSONSchemaValidationError]: if isinstance(instance, HydrationError): yield JSONSchemaValidationError(instance.message) @@ -43,7 +52,9 @@ def _placeholders(validator, _, instance, schema): version="prefect", type_checker=Draft202012Validator.TYPE_CHECKER, format_checker=Draft202012Validator.FORMAT_CHECKER, - id_of=Draft202012Validator.ID_OF, + id_of=cast( # the stub for create() is wrong here; id_of accepts (Schema) -> str | None + Callable[[Schema], str], Draft202012Validator.ID_OF + ), applicable_validators=_applicable_validators, ) @@ -51,24 +62,23 @@ def _placeholders(validator, _, instance, schema): _VALIDATOR = _build_validator() -def is_valid_schema(schema: Dict, preprocess: bool = True): +def is_valid_schema(schema: ObjectSchema, preprocess: bool = True) -> None: if preprocess: schema = preprocess_schema(schema) try: - if schema is not None: - _VALIDATOR.check_schema(schema, format_checker=_VALIDATOR.FORMAT_CHECKER) + _VALIDATOR.check_schema(schema, format_checker=_VALIDATOR.FORMAT_CHECKER) except jsonschema.SchemaError as exc: raise ValueError(f"Invalid schema: {exc.message}") from exc def validate( - obj: Dict, - schema: Dict, + obj: dict[str, Any], + schema: ObjectSchema, raise_on_error: bool = False, preprocess: bool = True, ignore_required: bool = False, allow_none_with_default: bool = False, -) -> List[JSONSchemaValidationError]: +) -> list[JSONSchemaValidationError]: if preprocess: schema = preprocess_schema(schema, allow_none_with_default) @@ -93,32 +103,31 @@ def validate( else: try: validator = _VALIDATOR(schema, format_checker=_VALIDATOR.FORMAT_CHECKER) - errors = list(validator.iter_errors(obj)) + errors = list(validator.iter_errors(obj)) # type: ignore except RecursionError: raise CircularSchemaRefError return errors -def is_valid( - obj: Dict, - schema: Dict, -) -> bool: +def is_valid(obj: dict[str, Any], schema: ObjectSchema) -> bool: errors = validate(obj, schema) - return len(errors) == 0 + return not errors -def prioritize_placeholder_errors(errors): - errors_by_path = defaultdict(list) +def prioritize_placeholder_errors( + errors: list[JSONSchemaValidationError], +) -> list[JSONSchemaValidationError]: + errors_by_path: dict[str, list[JSONSchemaValidationError]] = defaultdict(list) for error in errors: path_str = "->".join(str(p) for p in error.relative_path) errors_by_path[path_str].append(error) - filtered_errors = [] - for path, grouped_errors in errors_by_path.items(): + filtered_errors: list[JSONSchemaValidationError] = [] + for grouped_errors in errors_by_path.values(): placeholders_errors = [ error for error in grouped_errors - if error.validator == PLACEHOLDERS_VALIDATOR_NAME + if error.validator == PLACEHOLDERS_VALIDATOR_NAME # type: ignore # typing stubs are incomplete ] if placeholders_errors: @@ -129,8 +138,8 @@ def prioritize_placeholder_errors(errors): return filtered_errors -def build_error_obj(errors: List[JSONSchemaValidationError]) -> Dict: - error_response: Dict[str, Any] = {"errors": []} +def build_error_obj(errors: list[JSONSchemaValidationError]) -> dict[str, Any]: + error_response: dict[str, Any] = {"errors": []} # If multiple errors are present for the same path and one of them # is a placeholder error, we want only want to use the placeholder error. @@ -145,11 +154,11 @@ def build_error_obj(errors: List[JSONSchemaValidationError]) -> Dict: # Required errors should be moved one level down to the property # they're associated with, so we add an extra level to the path. - if error.validator == "required": - required_field = error.message.split(" ")[0].strip("'") + if error.validator == "required": # type: ignore + required_field = error.message.partition(" ")[0].strip("'") path.append(required_field) - current = error_response["errors"] + current: list[Any] = error_response["errors"] # error at the root, just append the error message if not path: @@ -163,10 +172,10 @@ def build_error_obj(errors: List[JSONSchemaValidationError]) -> Dict: else: for entry in current: if entry.get("index") == part: - current = entry["errors"] + current = cast(list[Any], entry["errors"]) break else: - new_entry = {"index": part, "errors": []} + new_entry: dict[str, Any] = {"index": part, "errors": []} current.append(new_entry) current = new_entry["errors"] else: @@ -182,7 +191,7 @@ def build_error_obj(errors: List[JSONSchemaValidationError]) -> Dict: current.append(new_entry) current = new_entry["errors"] - valid = len(error_response["errors"]) == 0 + valid = not bool(error_response["errors"]) error_response["valid"] = valid return error_response @@ -190,10 +199,10 @@ def build_error_obj(errors: List[JSONSchemaValidationError]) -> Dict: def _fix_null_typing( key: str, - schema: Dict, - required_fields: List[str], + schema: dict[str, Any], + required_fields: list[str], allow_none_with_default: bool = False, -): +) -> None: """ Pydantic V1 does not generate a valid Draft2020-12 schema for null types. """ @@ -207,7 +216,7 @@ def _fix_null_typing( del schema["type"] -def _fix_tuple_items(schema: Dict): +def _fix_tuple_items(schema: dict[str, Any]) -> None: """ Pydantic V1 does not generate a valid Draft2020-12 schema for tuples. """ @@ -216,15 +225,15 @@ def _fix_tuple_items(schema: Dict): and isinstance(schema["items"], list) and not schema.get("prefixItems") ): - schema["prefixItems"] = deepcopy(schema["items"]) + schema["prefixItems"] = deepcopy(cast(list[Any], schema["items"])) del schema["items"] def process_properties( - properties: Dict, - required_fields: List[str], + properties: dict[str, dict[str, Any]], + required_fields: list[str], allow_none_with_default: bool = False, -): +) -> None: for key, schema in properties.items(): _fix_null_typing(key, schema, required_fields, allow_none_with_default) _fix_tuple_items(schema) @@ -235,9 +244,9 @@ def process_properties( def preprocess_schema( - schema: Dict, + schema: ObjectSchema, allow_none_with_default: bool = False, -): +) -> ObjectSchema: schema = deepcopy(schema) if "properties" in schema: @@ -247,7 +256,8 @@ def preprocess_schema( ) if "definitions" in schema: # Also process definitions for reused models - for definition in (schema["definitions"] or {}).values(): + definitions = cast(dict[str, Any], schema["definitions"]) + for definition in definitions.values(): if "properties" in definition: required_fields = definition.get("required", []) process_properties( diff --git a/src/prefect/utilities/services.py b/src/prefect/utilities/services.py index 60dd0cf61c8e..de183c73c555 100644 --- a/src/prefect/utilities/services.py +++ b/src/prefect/utilities/services.py @@ -1,9 +1,10 @@ -import sys import threading from collections import deque +from collections.abc import Coroutine +from logging import Logger from traceback import format_exception from types import TracebackType -from typing import Callable, Coroutine, Deque, Optional, Tuple +from typing import Any, Callable, Optional from wsgiref.simple_server import WSGIServer import anyio @@ -14,11 +15,11 @@ from prefect.utilities.collections import distinct from prefect.utilities.math import clamped_poisson_interval -logger = get_logger("utilities.services.critical_service_loop") +logger: Logger = get_logger("utilities.services.critical_service_loop") async def critical_service_loop( - workload: Callable[..., Coroutine], + workload: Callable[..., Coroutine[Any, Any, Any]], interval: float, memory: int = 10, consecutive: int = 3, @@ -26,7 +27,7 @@ async def critical_service_loop( printer: Callable[..., None] = print, run_once: bool = False, jitter_range: Optional[float] = None, -): +) -> None: """ Runs the given `workload` function on the specified `interval`, while being forgiving of intermittent issues like temporary HTTP errors. If more than a certain @@ -50,8 +51,8 @@ async def critical_service_loop( between `interval * (1 - range) < rv < interval * (1 + range)` """ - track_record: Deque[bool] = deque([True] * consecutive, maxlen=consecutive) - failures: Deque[Tuple[Exception, TracebackType]] = deque(maxlen=memory) + track_record: deque[bool] = deque([True] * consecutive, maxlen=consecutive) + failures: deque[tuple[Exception, Optional[TracebackType]]] = deque(maxlen=memory) backoff_count = 0 while True: @@ -78,7 +79,7 @@ async def critical_service_loop( # or Prefect Cloud is having an outage (which will be covered by the # exception clause below) track_record.append(False) - failures.append((exc, sys.exc_info()[-1])) + failures.append((exc, exc.__traceback__)) logger.debug( f"Run of {workload!r} failed with TransportError", exc_info=exc ) @@ -88,7 +89,7 @@ async def critical_service_loop( # likely to be temporary and transient. Don't quit over these unless # it is prolonged. track_record.append(False) - failures.append((exc, sys.exc_info()[-1])) + failures.append((exc, exc.__traceback__)) logger.debug( f"Run of {workload!r} failed with HTTPStatusError", exc_info=exc ) @@ -155,10 +156,10 @@ async def critical_service_loop( await anyio.sleep(sleep) -_metrics_server: Optional[Tuple[WSGIServer, threading.Thread]] = None +_metrics_server: Optional[tuple[WSGIServer, threading.Thread]] = None -def start_client_metrics_server(): +def start_client_metrics_server() -> None: """Start the process-wide Prometheus metrics server for client metrics (if enabled with `PREFECT_CLIENT_METRICS_ENABLED`) on the port `PREFECT_CLIENT_METRICS_PORT`.""" if not PREFECT_CLIENT_METRICS_ENABLED: @@ -173,7 +174,7 @@ def start_client_metrics_server(): _metrics_server = start_http_server(port=PREFECT_CLIENT_METRICS_PORT.value()) -def stop_client_metrics_server(): +def stop_client_metrics_server() -> None: """Start the process-wide Prometheus metrics server for client metrics, if it has previously been started""" global _metrics_server diff --git a/src/prefect/utilities/templating.py b/src/prefect/utilities/templating.py index 3e46337ba8f7..6e597a309786 100644 --- a/src/prefect/utilities/templating.py +++ b/src/prefect/utilities/templating.py @@ -1,17 +1,7 @@ import enum import os import re -from typing import ( - TYPE_CHECKING, - Any, - Dict, - NamedTuple, - Optional, - Set, - Type, - TypeVar, - Union, -) +from typing import TYPE_CHECKING, Any, NamedTuple, Optional, TypeVar, Union, cast from prefect.client.utilities import inject_client from prefect.utilities.annotations import NotSet @@ -21,7 +11,7 @@ from prefect.client.orchestration import PrefectClient -T = TypeVar("T", str, int, float, bool, dict, list, None) +T = TypeVar("T", str, int, float, bool, dict[Any, Any], list[Any], None) PLACEHOLDER_CAPTURE_REGEX = re.compile(r"({{\s*([\w\.\-\[\]$]+)\s*}})") BLOCK_DOCUMENT_PLACEHOLDER_PREFIX = "prefect.blocks." @@ -62,7 +52,7 @@ def determine_placeholder_type(name: str) -> PlaceholderType: return PlaceholderType.STANDARD -def find_placeholders(template: T) -> Set[Placeholder]: +def find_placeholders(template: T) -> set[Placeholder]: """ Finds all placeholders in a template. @@ -72,8 +62,9 @@ def find_placeholders(template: T) -> Set[Placeholder]: Returns: A set of all placeholders in the template """ + seed: set[Placeholder] = set() if isinstance(template, (int, float, bool)): - return set() + return seed if isinstance(template, str): result = PLACEHOLDER_CAPTURE_REGEX.findall(template) return { @@ -81,18 +72,16 @@ def find_placeholders(template: T) -> Set[Placeholder]: for full_match, name in result } elif isinstance(template, dict): - return set().union( - *[find_placeholders(value) for key, value in template.items()] - ) + return seed.union(*[find_placeholders(value) for value in template.values()]) elif isinstance(template, list): - return set().union(*[find_placeholders(item) for item in template]) + return seed.union(*[find_placeholders(item) for item in template]) else: raise ValueError(f"Unexpected type: {type(template)}") def apply_values( - template: T, values: Dict[str, Any], remove_notset: bool = True -) -> Union[T, Type[NotSet]]: + template: T, values: dict[str, Any], remove_notset: bool = True +) -> Union[T, type[NotSet]]: """ Replaces placeholders in a template with values from a supplied dictionary. @@ -120,7 +109,7 @@ def apply_values( Returns: The template with the values applied """ - if isinstance(template, (int, float, bool, type(NotSet), type(None))): + if template in (NotSet, None) or isinstance(template, (int, float)): return template if isinstance(template, str): placeholders = find_placeholders(template) @@ -155,7 +144,7 @@ def apply_values( return template elif isinstance(template, dict): - updated_template = {} + updated_template: dict[str, Any] = {} for key, value in template.items(): updated_value = apply_values(value, values, remove_notset=remove_notset) if updated_value is not NotSet: @@ -163,22 +152,22 @@ def apply_values( elif not remove_notset: updated_template[key] = value - return updated_template + return cast(T, updated_template) elif isinstance(template, list): - updated_list = [] + updated_list: list[Any] = [] for value in template: updated_value = apply_values(value, values, remove_notset=remove_notset) if updated_value is not NotSet: updated_list.append(updated_value) - return updated_list + return cast(T, updated_list) else: raise ValueError(f"Unexpected template type {type(template).__name__!r}") @inject_client async def resolve_block_document_references( - template: T, client: "PrefectClient" = None -) -> Union[T, Dict[str, Any]]: + template: T, client: Optional["PrefectClient"] = None +) -> Union[T, dict[str, Any]]: """ Resolve block document references in a template by replacing each reference with the data of the block document. @@ -242,12 +231,17 @@ async def resolve_block_document_references( Returns: The template with block documents resolved """ + if TYPE_CHECKING: + # The @inject_client decorator takes care of providing the client, but + # the function signature must mark it as optional to callers. + assert client is not None + if isinstance(template, dict): block_document_id = template.get("$ref", {}).get("block_document_id") if block_document_id: block_document = await client.read_block_document(block_document_id) return block_document.data - updated_template = {} + updated_template: dict[str, Any] = {} for key, value in template.items(): updated_value = await resolve_block_document_references( value, client=client @@ -265,7 +259,7 @@ async def resolve_block_document_references( placeholder.type is PlaceholderType.BLOCK_DOCUMENT for placeholder in placeholders ) - if len(placeholders) == 0 or not has_block_document_placeholder: + if not (placeholders and has_block_document_placeholder): return template elif ( len(placeholders) == 1 @@ -274,31 +268,32 @@ async def resolve_block_document_references( ): # value_keypath will be a list containing a dot path if additional # attributes are accessed and an empty list otherwise. - block_type_slug, block_document_name, *value_keypath = ( - list(placeholders)[0] - .name.replace(BLOCK_DOCUMENT_PLACEHOLDER_PREFIX, "") - .split(".", 2) - ) + [placeholder] = placeholders + parts = placeholder.name.replace( + BLOCK_DOCUMENT_PLACEHOLDER_PREFIX, "" + ).split(".", 2) + block_type_slug, block_document_name, *value_keypath = parts block_document = await client.read_block_document_by_name( name=block_document_name, block_type_slug=block_type_slug ) - value = block_document.data + data = block_document.data + value: Union[T, dict[str, Any]] = data # resolving system blocks to their data for backwards compatibility - if len(value) == 1 and "value" in value: + if len(data) == 1 and "value" in data: # only resolve the value if the keypath is not already pointing to "value" - if len(value_keypath) == 0 or value_keypath[0][:5] != "value": - value = value["value"] + if not (value_keypath and value_keypath[0].startswith("value")): + data = value = value["value"] # resolving keypath/block attributes - if len(value_keypath) > 0: - value_keypath: str = value_keypath[0] - value = get_from_dict(value, value_keypath, default=NotSet) - if value is NotSet: + if value_keypath: + from_dict: Any = get_from_dict(data, value_keypath[0], default=NotSet) + if from_dict is NotSet: raise ValueError( f"Invalid template: {template!r}. Could not resolve the" " keypath in the block document data." ) + value = from_dict return value else: @@ -311,7 +306,7 @@ async def resolve_block_document_references( @inject_client -async def resolve_variables(template: T, client: Optional["PrefectClient"] = None): +async def resolve_variables(template: T, client: Optional["PrefectClient"] = None) -> T: """ Resolve variables in a template by replacing each variable placeholder with the value of the variable. @@ -326,6 +321,11 @@ async def resolve_variables(template: T, client: Optional["PrefectClient"] = Non Returns: The template with variables resolved """ + if TYPE_CHECKING: + # The @inject_client decorator takes care of providing the client, but + # the function signature must mark it as optional to callers. + assert client is not None + if isinstance(template, str): placeholders = find_placeholders(template) has_variable_placeholder = any( @@ -346,7 +346,7 @@ async def resolve_variables(template: T, client: Optional["PrefectClient"] = Non if variable is None: return "" else: - return variable.value + return cast(T, variable.value) else: for full_match, name, placeholder_type in placeholders: if placeholder_type is PlaceholderType.VARIABLE: @@ -355,7 +355,7 @@ async def resolve_variables(template: T, client: Optional["PrefectClient"] = Non if variable is None: template = template.replace(full_match, "") else: - template = template.replace(full_match, variable.value) + template = template.replace(full_match, str(variable.value)) return template elif isinstance(template, dict): return { diff --git a/src/prefect/utilities/text.py b/src/prefect/utilities/text.py index 14637c72f49b..3f37d2c719d2 100644 --- a/src/prefect/utilities/text.py +++ b/src/prefect/utilities/text.py @@ -1,5 +1,6 @@ import difflib -from typing import Iterable, Optional +from collections.abc import Iterable +from typing import Optional def truncated_to(length: int, value: Optional[str]) -> str: diff --git a/src/prefect/utilities/timeout.py b/src/prefect/utilities/timeout.py index 0074a4d337ae..596ad5c92568 100644 --- a/src/prefect/utilities/timeout.py +++ b/src/prefect/utilities/timeout.py @@ -1,6 +1,6 @@ from asyncio import CancelledError from contextlib import contextmanager -from typing import Optional, Type +from typing import Optional from prefect._internal.concurrency.cancellation import ( cancel_async_after, @@ -8,7 +8,7 @@ ) -def fail_if_not_timeout_error(timeout_exc_type: Type[Exception]) -> None: +def fail_if_not_timeout_error(timeout_exc_type: type[Exception]) -> None: if not issubclass(timeout_exc_type, TimeoutError): raise ValueError( "The `timeout_exc_type` argument must be a subclass of `TimeoutError`." @@ -17,7 +17,7 @@ def fail_if_not_timeout_error(timeout_exc_type: Type[Exception]) -> None: @contextmanager def timeout_async( - seconds: Optional[float] = None, timeout_exc_type: Type[TimeoutError] = TimeoutError + seconds: Optional[float] = None, timeout_exc_type: type[TimeoutError] = TimeoutError ): fail_if_not_timeout_error(timeout_exc_type) @@ -34,7 +34,7 @@ def timeout_async( @contextmanager def timeout( - seconds: Optional[float] = None, timeout_exc_type: Type[TimeoutError] = TimeoutError + seconds: Optional[float] = None, timeout_exc_type: type[TimeoutError] = TimeoutError ): fail_if_not_timeout_error(timeout_exc_type) diff --git a/src/prefect/utilities/urls.py b/src/prefect/utilities/urls.py index 7b99f645b648..eadaaa106426 100644 --- a/src/prefect/utilities/urls.py +++ b/src/prefect/utilities/urls.py @@ -2,6 +2,7 @@ import ipaddress import socket import urllib.parse +from logging import Logger from string import Formatter from typing import TYPE_CHECKING, Any, Literal, Optional, Union from urllib.parse import urlparse @@ -19,7 +20,7 @@ from prefect.futures import PrefectFuture from prefect.variables import Variable -logger = get_logger("utilities.urls") +logger: Logger = get_logger("utilities.urls") # The following objects are excluded from UI URL generation because we lack a # directly-addressable URL: @@ -64,7 +65,7 @@ RUN_TYPES = {"flow-run", "task-run"} -def validate_restricted_url(url: str): +def validate_restricted_url(url: str) -> None: """ Validate that the provided URL is safe for outbound requests. This prevents attacks like SSRF (Server Side Request Forgery), where an attacker can make @@ -123,7 +124,7 @@ def convert_class_to_name(obj: Any) -> str: def url_for( obj: Union[ - "PrefectFuture", + "PrefectFuture[Any]", "Block", "Variable", "Automation", @@ -163,6 +164,7 @@ def url_for( url_for("flow-run", obj_id="123e4567-e89b-12d3-a456-426614174000") """ from prefect.blocks.core import Block + from prefect.client.schemas.objects import WorkPool from prefect.events.schemas.automations import Automation from prefect.events.schemas.events import ReceivedEvent, Resource from prefect.futures import PrefectFuture @@ -228,8 +230,10 @@ def url_for( elif name == "block": # Blocks are client-side objects whose API representation is a # BlockDocument. - obj_id = obj._block_document_id + obj_id = getattr(obj, "_block_document_id") elif name in ("variable", "work-pool"): + if TYPE_CHECKING: + assert isinstance(obj, (Variable, WorkPool)) obj_id = obj.name elif isinstance(obj, Resource): obj_id = obj.id.rpartition(".")[2] @@ -244,6 +248,7 @@ def url_for( url_format = ( UI_URL_FORMATS.get(name) if url_type == "ui" else API_URL_FORMATS.get(name) ) + assert url_format is not None if isinstance(obj, ReceivedEvent): url = url_format.format( diff --git a/src/prefect/utilities/visualization.py b/src/prefect/utilities/visualization.py index 0a6bd73b2774..b149fa42806e 100644 --- a/src/prefect/utilities/visualization.py +++ b/src/prefect/utilities/visualization.py @@ -2,10 +2,12 @@ Utilities for working with Flow.visualize() """ +from collections.abc import Coroutine from functools import partial -from typing import Any, List, Optional +from typing import Any, Literal, Optional, Union, overload -import graphviz +import graphviz # type: ignore # no typing stubs available +from typing_extensions import Self from prefect._internal.concurrency.api import from_async @@ -19,7 +21,7 @@ class VisualizationUnsupportedError(Exception): class TaskVizTrackerState: - current = None + current: Optional["TaskVizTracker"] = None class GraphvizImportError(Exception): @@ -30,16 +32,36 @@ class GraphvizExecutableNotFoundError(Exception): pass -def get_task_viz_tracker(): +def get_task_viz_tracker() -> Optional["TaskVizTracker"]: return TaskVizTrackerState.current +@overload +def track_viz_task( + is_async: Literal[True], + task_name: str, + parameters: dict[str, Any], + viz_return_value: Optional[Any] = None, +) -> Coroutine[Any, Any, Any]: + ... + + +@overload +def track_viz_task( + is_async: Literal[False], + task_name: str, + parameters: dict[str, Any], + viz_return_value: Optional[Any] = None, +) -> Any: + ... + + def track_viz_task( is_async: bool, task_name: str, - parameters: dict, + parameters: dict[str, Any], viz_return_value: Optional[Any] = None, -): +) -> Union[Coroutine[Any, Any, Any], Any]: """Return a result if sync otherwise return a coroutine that returns the result""" if is_async: return from_async.wait_for_call_in_loop_thread( @@ -50,14 +72,14 @@ def track_viz_task( def _track_viz_task( - task_name, - parameters, - viz_return_value=None, + task_name: str, + parameters: dict[str, Any], + viz_return_value: Optional[Any] = None, ) -> Any: task_run_tracker = get_task_viz_tracker() if task_run_tracker: - upstream_tasks = [] - for k, v in parameters.items(): + upstream_tasks: list[VizTask] = [] + for _, v in parameters.items(): if isinstance(v, VizTask): upstream_tasks.append(v) # if it's an object that we've already seen, @@ -85,19 +107,19 @@ class VizTask: def __init__( self, name: str, - upstream_tasks: Optional[List["VizTask"]] = None, + upstream_tasks: Optional[list["VizTask"]] = None, ): self.name = name - self.upstream_tasks = upstream_tasks if upstream_tasks else [] + self.upstream_tasks: list[VizTask] = upstream_tasks if upstream_tasks else [] class TaskVizTracker: def __init__(self): - self.tasks = [] - self.dynamic_task_counter = {} - self.object_id_to_task = {} + self.tasks: list[VizTask] = [] + self.dynamic_task_counter: dict[str, int] = {} + self.object_id_to_task: dict[int, VizTask] = {} - def add_task(self, task: VizTask): + def add_task(self, task: VizTask) -> None: if task.name not in self.dynamic_task_counter: self.dynamic_task_counter[task.name] = 0 else: @@ -106,11 +128,11 @@ def add_task(self, task: VizTask): task.name = f"{task.name}-{self.dynamic_task_counter[task.name]}" self.tasks.append(task) - def __enter__(self): + def __enter__(self) -> Self: TaskVizTrackerState.current = self return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: TaskVizTrackerState.current = None def link_viz_return_value_to_viz_task( @@ -129,7 +151,7 @@ def link_viz_return_value_to_viz_task( self.object_id_to_task[id(viz_return_value)] = viz_task -def build_task_dependencies(task_run_tracker: TaskVizTracker): +def build_task_dependencies(task_run_tracker: TaskVizTracker) -> graphviz.Digraph: """ Constructs a Graphviz directed graph object that represents the dependencies between tasks in the given TaskVizTracker. @@ -151,9 +173,9 @@ def build_task_dependencies(task_run_tracker: TaskVizTracker): try: g = graphviz.Digraph() for task in task_run_tracker.tasks: - g.node(task.name) + g.node(task.name) # type: ignore[reportUnknownMemberType] for upstream in task.upstream_tasks: - g.edge(upstream.name, task.name) + g.edge(upstream.name, task.name) # type: ignore[reportUnknownMemberType] return g except ImportError as exc: raise GraphvizImportError from exc @@ -166,7 +188,7 @@ def build_task_dependencies(task_run_tracker: TaskVizTracker): ) -def visualize_task_dependencies(graph: graphviz.Digraph, flow_run_name: str): +def visualize_task_dependencies(graph: graphviz.Digraph, flow_run_name: str) -> None: """ Renders and displays a Graphviz directed graph representing task dependencies. @@ -184,7 +206,7 @@ def visualize_task_dependencies(graph: graphviz.Digraph, flow_run_name: str): specifying a `viz_return_value`. """ try: - graph.render(filename=flow_run_name, view=True, format="png", cleanup=True) + graph.render(filename=flow_run_name, view=True, format="png", cleanup=True) # type: ignore[reportUnknownMemberType] except graphviz.backend.ExecutableNotFound as exc: msg = ( "It appears you do not have Graphviz installed, or it is not on your " diff --git a/src/prefect/variables.py b/src/prefect/variables.py index 1e785baa2166..9218b858a08b 100644 --- a/src/prefect/variables.py +++ b/src/prefect/variables.py @@ -1,13 +1,14 @@ -from typing import List, Optional +from typing import Optional from pydantic import BaseModel, Field +from prefect._internal.compatibility.async_dispatch import async_dispatch from prefect._internal.compatibility.migration import getattr_migration +from prefect.client.orchestration import get_client from prefect.client.schemas.actions import VariableCreate, VariableUpdate from prefect.client.utilities import get_or_create_client from prefect.exceptions import ObjectNotFound from prefect.types import MAX_VARIABLE_NAME_LENGTH, StrictVariableValue -from prefect.utilities.asyncutils import sync_compatible class Variable(BaseModel): @@ -31,19 +32,18 @@ class Variable(BaseModel): description="The value of the variable", examples=["my-value"], ) - tags: Optional[List[str]] = Field(default=None) + tags: Optional[list[str]] = Field(default=None) @classmethod - @sync_compatible - async def set( + async def aset( cls, name: str, value: StrictVariableValue, - tags: Optional[List[str]] = None, + tags: Optional[list[str]] = None, overwrite: bool = False, ) -> "Variable": """ - Sets a new variable. If one exists with the same name, must pass `overwrite=True` + Asynchronously sets a new variable. If one exists with the same name, must pass `overwrite=True` Returns the newly set variable object. @@ -60,8 +60,8 @@ async def set( from prefect.variables import Variable @flow - def my_flow(): - Variable.set(name="my_var",value="test_value", tags=["hi", "there"], overwrite=True) + async def my_flow(): + await Variable.aset(name="my_var",value="test_value", tags=["hi", "there"], overwrite=True) ``` """ client, _ = get_or_create_client() @@ -73,27 +73,76 @@ def my_flow(): raise ValueError( f"Variable {name!r} already exists. Use `overwrite=True` to update it." ) - await client.update_variable(variable=VariableUpdate(**var_dict)) + await client.update_variable( + variable=VariableUpdate.model_validate(var_dict) + ) variable = await client.read_variable_by_name(name) - var_dict = { - "name": variable.name, - "value": variable.value, - "tags": variable.tags or [], - } + for key in var_dict.keys(): + var_dict.update({key: getattr(variable, key)}) else: - await client.create_variable(variable=VariableCreate(**var_dict)) + await client.create_variable( + variable=VariableCreate.model_validate(var_dict) + ) + + return cls.model_validate(var_dict) + + @classmethod + @async_dispatch(aset) + def set( + cls, + name: str, + value: StrictVariableValue, + tags: Optional[list[str]] = None, + overwrite: bool = False, + ) -> "Variable": + """ + Sets a new variable. If one exists with the same name, must pass `overwrite=True` + + Returns the newly set variable object. + + Args: + - name: The name of the variable to set. + - value: The value of the variable to set. + - tags: An optional list of strings to associate with the variable. + - overwrite: Whether to overwrite the variable if it already exists. + + Example: + Set a new variable and overwrite it if it already exists. + + ``` + from prefect.variables import Variable + + @flow + def my_flow(): + Variable.set(name="my_var",value="test_value", tags=["hi", "there"], overwrite=True) + ``` + """ + with get_client(sync_client=True) as client: + variable_exists = client.read_variable_by_name(name) + var_dict = {"name": name, "value": value, "tags": tags or []} - return cls(**var_dict) + if variable_exists: + if not overwrite: + raise ValueError( + f"Variable {name!r} already exists. Use `overwrite=True` to update it." + ) + client.update_variable(variable=VariableUpdate.model_validate(var_dict)) + variable = client.read_variable_by_name(name) + for key in var_dict.keys(): + var_dict.update({key: getattr(variable, key)}) + else: + client.create_variable(variable=VariableCreate.model_validate(var_dict)) + + return cls.model_validate(var_dict) @classmethod - @sync_compatible - async def get( + async def aget( cls, name: str, default: StrictVariableValue = None, ) -> StrictVariableValue: """ - Get a variable's value by name. + Asynchronously get a variable's value by name. If the variable does not exist, return the default value. @@ -108,8 +157,8 @@ async def get( from prefect.variables import Variable @flow - def my_flow(): - var = Variable.get("my_var") + async def my_flow(): + var = await Variable.aget("my_var") ``` """ client, _ = get_or_create_client() @@ -118,10 +167,41 @@ def my_flow(): return variable.value if variable else default @classmethod - @sync_compatible - async def unset(cls, name: str) -> bool: + @async_dispatch(aget) + def get( + cls, + name: str, + default: StrictVariableValue = None, + ) -> StrictVariableValue: """ - Unset a variable by name. + Get a variable's value by name. + + If the variable does not exist, return the default value. + + Args: + - name: The name of the variable value to get. + - default: The default value to return if the variable does not exist. + + Example: + Get a variable's value by name. + ```python + from prefect import flow + from prefect.variables import Variable + + @flow + def my_flow(): + var = Variable.get("my_var") + ``` + """ + with get_client(sync_client=True) as client: + variable = client.read_variable_by_name(name) + + return variable.value if variable else default + + @classmethod + async def aunset(cls, name: str) -> bool: + """ + Asynchronously unset a variable by name. Args: - name: The name of the variable to unset. @@ -135,8 +215,8 @@ async def unset(cls, name: str) -> bool: from prefect.variables import Variable @flow - def my_flow(): - Variable.unset("my_var") + async def my_flow(): + await Variable.aunset("my_var") ``` """ client, _ = get_or_create_client() @@ -146,5 +226,34 @@ def my_flow(): except ObjectNotFound: return False + @classmethod + @async_dispatch(aunset) + def unset(cls, name: str) -> bool: + """ + Unset a variable by name. + + Args: + - name: The name of the variable to unset. + + Returns `True` if the variable was deleted, `False` if the variable did not exist. + + Example: + Unset a variable by name. + ```python + from prefect import flow + from prefect.variables import Variable + + @flow + def my_flow(): + Variable.unset("my_var") + ``` + """ + with get_client(sync_client=True) as client: + try: + client.delete_variable_by_name(name=name) + return True + except ObjectNotFound: + return False + __getattr__ = getattr_migration(__name__) diff --git a/src/prefect/workers/base.py b/src/prefect/workers/base.py index 7220c2e83d23..188646baaaac 100644 --- a/src/prefect/workers/base.py +++ b/src/prefect/workers/base.py @@ -53,6 +53,7 @@ Pending, exception_to_failed_state, ) +from prefect.types import KeyValueLabels from prefect.utilities.dispatch import get_registry_for_type, register_base_type from prefect.utilities.engine import propose_state from prefect.utilities.services import critical_service_loop @@ -988,7 +989,6 @@ async def _submit_run_and_capture_errors( try: configuration = await self._get_configuration(flow_run) submitted_event = self._emit_flow_run_submitted_event(configuration) - await self._give_worker_labels_to_flow_run(flow_run.id) result = await self.run( flow_run=flow_run, task_status=task_status, @@ -1222,13 +1222,20 @@ async def _give_worker_labels_to_flow_run(self, flow_run_id: UUID): Give this worker's identifying labels to the specified flow run. """ if self._cloud_client: - await self._cloud_client.update_flow_run_labels( - flow_run_id, - { - "prefect.worker.name": self.name, - "prefect.worker.type": self.type, - }, - ) + labels: KeyValueLabels = { + "prefect.worker.name": self.name, + "prefect.worker.type": self.type, + } + + if self._work_pool: + labels.update( + { + "prefect.work-pool.name": self._work_pool.name, + "prefect.work-pool.id": str(self._work_pool.id), + } + ) + + await self._cloud_client.update_flow_run_labels(flow_run_id, labels) async def __aenter__(self): self._logger.debug("Entering worker context...") diff --git a/tests/_internal/compatibility/test_async_dispatch.py b/tests/_internal/compatibility/test_async_dispatch.py index ea7b10c4387b..c2427a728d3e 100644 --- a/tests/_internal/compatibility/test_async_dispatch.py +++ b/tests/_internal/compatibility/test_async_dispatch.py @@ -7,7 +7,9 @@ async_dispatch, is_in_async_context, ) -from prefect.utilities.asyncutils import run_sync_in_worker_thread +from prefect.flows import flow +from prefect.tasks import task +from prefect.utilities.asyncutils import run_coro_as_sync, run_sync_in_worker_thread class TestAsyncDispatchBasicUsage: @@ -58,6 +60,38 @@ def my_function() -> None: await my_function(_sync=False) assert data == ["async"] + async def test_works_with_classmethods(self): + """Verify that async_impl can be a classmethod""" + + class MyClass: + @classmethod + async def my_amethod(cls) -> str: + return "async" + + @classmethod + @async_dispatch(my_amethod) + def my_method(cls) -> str: + return "sync" + + assert await MyClass.my_amethod() == "async" + assert MyClass.my_method(_sync=True) == "sync" + assert await MyClass.my_method() == "async" + + def test_works_with_classmethods_in_sync_context(self): + """Verify that classmethods work in sync context""" + + class MyClass: + @classmethod + async def my_amethod(cls) -> str: + return "async" + + @classmethod + @async_dispatch(my_amethod) + def my_method(cls) -> str: + return "sync" + + assert MyClass.my_method() == "sync" + class TestAsyncDispatchValidation: def test_async_compatible_requires_async_implementation(self): @@ -182,3 +216,89 @@ def check_context() -> None: assert ( is_in_async_context() is False ), "the loop should be closed and not considered an async context" + + +class TestIsInARunContext: + def test_dispatches_to_async_in_async_flow(self): + """ + Verify that async_dispatch dispatches to async in async flow + + The test function is sync, but the flow is async, so we should dispatch to + the async implementation. + """ + + async def my_afunction() -> str: + return "async" + + @async_dispatch(my_afunction) + def my_function() -> str: + return "sync" + + @flow + async def my_flow() -> str: + return await my_function() + + assert run_coro_as_sync(my_flow()) == "async" + + async def test_dispatches_to_sync_in_sync_flow(self): + """ + Verify that async_dispatch dispatches to sync in sync flow + + The test function is async, but the flow is sync, so we should dispatch to + the sync implementation. + """ + + async def my_afunction() -> str: + return "async" + + @async_dispatch(my_afunction) + def my_function() -> str: + return "sync" + + @flow + def my_flow() -> str: + return my_function() + + assert my_flow() == "sync" + + def test_dispatches_to_async_in_an_async_task(self): + """ + Verify that async_dispatch dispatches to async in an async task + + The test function is sync, but the task is async, so we should dispatch to + the async implementation. + """ + + async def my_afunction() -> str: + return "async" + + @async_dispatch(my_afunction) + def my_function() -> str: + return "sync" + + @task + async def my_task() -> str: + return await my_function() + + assert run_coro_as_sync(my_task()) == "async" + + async def test_dispatches_to_sync_in_a_sync_task(self): + """ + Verify that async_dispatch dispatches to sync in a sync task + + The test function is async, but the task is sync, so we should dispatch to + the sync implementation. + """ + + async def my_afunction() -> str: + return "async" + + @async_dispatch(my_afunction) + def my_function() -> str: + return "sync" + + @task + def my_task() -> str: + return my_function() + + assert my_task() == "sync" diff --git a/tests/_internal/test_retries.py b/tests/_internal/test_retries.py index d79b6e07e1f7..0d4376baa6ab 100644 --- a/tests/_internal/test_retries.py +++ b/tests/_internal/test_retries.py @@ -74,9 +74,9 @@ async def fail_func(): with pytest.raises(ValueError, match="Test error"), caplog.at_level("WARNING"): await fail_func() - assert ( - "Attempt 1 of function 'fail_func' failed with ValueError. Retrying in" - in caplog.text + assert all( + substr in caplog.text + for substr in ["Attempt 1 of function", "Test error", "Retrying in"] ) assert "'fail_func' failed after 2 attempts" in caplog.text assert mock_sleep.call_count == 1 diff --git a/tests/blocks/test_core.py b/tests/blocks/test_core.py index 80a7e2dd75ab..d70c6d12cc16 100644 --- a/tests/blocks/test_core.py +++ b/tests/blocks/test_core.py @@ -887,8 +887,8 @@ class ParentBlock(Block): async def test_block_load( self, test_block, block_document, in_memory_prefect_client ): - my_block = await test_block.load( - block_document.name, client=in_memory_prefect_client + my_block = test_block.load( + block_document.name, client=in_memory_prefect_client, _sync=True ) assert my_block._block_document_name == block_document.name @@ -897,6 +897,16 @@ async def test_block_load( assert my_block._block_schema_id == block_document.block_schema_id assert my_block.foo == "bar" + my_aloaded_block = await test_block.aload( + block_document.name, client=in_memory_prefect_client + ) + + assert my_aloaded_block._block_document_name == block_document.name + assert my_aloaded_block._block_document_id == block_document.id + assert my_aloaded_block._block_type_id == block_document.block_type_id + assert my_aloaded_block._block_schema_id == block_document.block_schema_id + assert my_aloaded_block.foo == "bar" + @patch("prefect.blocks.core.load_prefect_collections") async def test_block_load_loads_collections( self, @@ -905,7 +915,16 @@ async def test_block_load_loads_collections( block_document: BlockDocument, in_memory_prefect_client, ): - await Block.load( + Block.load( + block_document.block_type.slug + "/" + block_document.name, + client=in_memory_prefect_client, + _sync=True, + ) + mock_load_prefect_collections.assert_called_once() + + mock_load_prefect_collections.reset_mock() + + await Block.aload( block_document.block_type.slug + "/" + block_document.name, client=in_memory_prefect_client, ) @@ -918,9 +937,12 @@ class Custom(Block): my_custom_block = Custom(message="hello") await my_custom_block.save("my-custom-block") - loaded_block = await Block.load("custom/my-custom-block") + loaded_block = Block.load("custom/my-custom-block", _sync=True) assert loaded_block.message == "hello" + aloaded_block = await Block.aload("custom/my-custom-block") + assert aloaded_block.message == "hello" + async def test_load_nested_block(self, session, in_memory_prefect_client): class B(Block): _block_schema_type = "abc" @@ -1018,8 +1040,8 @@ class E(Block): await session.commit() - block_instance = await E.load( - "outer-block-document", client=in_memory_prefect_client + block_instance = E.load( + "outer-block-document", client=in_memory_prefect_client, _sync=True ) assert isinstance(block_instance, E) assert isinstance(block_instance.c, C) @@ -1051,12 +1073,56 @@ class E(Block): "block_type_slug": "d", } + aloaded_block_instance = await E.aload( + "outer-block-document", client=in_memory_prefect_client + ) + assert isinstance(aloaded_block_instance, E) + assert isinstance(aloaded_block_instance.c, C) + assert isinstance(aloaded_block_instance.d, D) + + assert aloaded_block_instance._block_document_name == outer_block_document.name + assert aloaded_block_instance._block_document_id == outer_block_document.id + assert ( + aloaded_block_instance._block_type_id == outer_block_document.block_type_id + ) + assert ( + aloaded_block_instance._block_schema_id + == outer_block_document.block_schema_id + ) + assert aloaded_block_instance.c.model_dump() == { + "y": 2, + "_block_document_id": middle_block_document_1.id, + "_block_document_name": "middle-block-document-1", + "_is_anonymous": False, + "block_type_slug": "c", + } + assert aloaded_block_instance.d.model_dump() == { + "b": { + "x": 1, + "_block_document_id": inner_block_document.id, + "_block_document_name": "inner-block-document", + "_is_anonymous": False, + "block_type_slug": "b", + }, + "z": "ztop", + "_block_document_id": middle_block_document_2.id, + "_block_document_name": "middle-block-document-2", + "_is_anonymous": False, + "block_type_slug": "d", + } + async def test_create_block_from_nonexistent_name(self, test_block): with pytest.raises( ValueError, match="Unable to find block document named blocky for block type x", ): - await test_block.load("blocky") + test_block.load("blocky", _sync=True) + + with pytest.raises( + ValueError, + match="Unable to find block document named blocky for block type x", + ): + await test_block.aload("blocky") async def test_save_block_from_flow(self): class Test(Block): diff --git a/tests/blocks/test_system.py b/tests/blocks/test_system.py index fbfa60a3c29b..57e332c803c0 100644 --- a/tests/blocks/test_system.py +++ b/tests/blocks/test_system.py @@ -2,9 +2,9 @@ import pytest from pydantic import Secret as PydanticSecret from pydantic import SecretStr -from pydantic_extra_types.pendulum_dt import DateTime as PydanticDateTime from prefect.blocks.system import DateTime, Secret +from prefect.types import DateTime as PydanticDateTime def test_datetime(ignore_prefect_deprecation_warnings): diff --git a/tests/cli/deployment/test_deployment_run.py b/tests/cli/deployment/test_deployment_run.py index 1152755fa708..f388b9b027c3 100644 --- a/tests/cli/deployment/test_deployment_run.py +++ b/tests/cli/deployment/test_deployment_run.py @@ -4,7 +4,6 @@ import pendulum import pytest -from pendulum.datetime import DateTime from pendulum.duration import Duration import prefect @@ -12,6 +11,7 @@ from prefect.exceptions import FlowRunWaitTimeout from prefect.states import Completed, Failed from prefect.testing.cli import invoke_and_assert +from prefect.types import DateTime from prefect.utilities.asyncutils import run_sync_in_worker_thread diff --git a/tests/cli/test_flow_run.py b/tests/cli/test_flow_run.py index 6da46bafba03..50eeb1125876 100644 --- a/tests/cli/test_flow_run.py +++ b/tests/cli/test_flow_run.py @@ -1,7 +1,6 @@ from uuid import UUID, uuid4 import pytest -from pydantic_extra_types.pendulum_dt import DateTime import prefect.exceptions from prefect import flow @@ -23,6 +22,7 @@ StateType, ) from prefect.testing.cli import invoke_and_assert +from prefect.types import DateTime from prefect.utilities.asyncutils import run_sync_in_worker_thread, sync_compatible diff --git a/tests/client/schemas/test_schedules.py b/tests/client/schemas/test_schedules.py index cbded66d27f7..48680aeba94a 100644 --- a/tests/client/schemas/test_schedules.py +++ b/tests/client/schemas/test_schedules.py @@ -2,7 +2,6 @@ from itertools import combinations import pytest -from pydantic_extra_types.pendulum_dt import DateTime from prefect.client.schemas.schedules import ( CronSchedule, @@ -10,6 +9,7 @@ RRuleSchedule, construct_schedule, ) +from prefect.types import DateTime class TestConstructSchedule: diff --git a/tests/client/test_prefect_client.py b/tests/client/test_prefect_client.py index 6791adc95855..4cf9b9550bb7 100644 --- a/tests/client/test_prefect_client.py +++ b/tests/client/test_prefect_client.py @@ -18,7 +18,6 @@ import respx from fastapi import Depends, FastAPI, status from fastapi.security import HTTPBearer -from pydantic_extra_types.pendulum_dt import DateTime import prefect.client.schemas as client_schemas import prefect.context @@ -90,6 +89,7 @@ from prefect.states import Completed, Pending, Running, Scheduled, State from prefect.tasks import task from prefect.testing.utilities import AsyncMock, exceptions_equal +from prefect.types import DateTime from prefect.utilities.pydantic import parse_obj_as @@ -631,6 +631,19 @@ def foo(): assert lookup.name == foo.name +async def test_create_then_delete_flow(prefect_client): + @flow + def foo(): + pass + + flow_id = await prefect_client.create_flow(foo) + assert isinstance(flow_id, UUID) + + await prefect_client.delete_flow(flow_id) + with pytest.raises(prefect.exceptions.PrefectHTTPStatusError, match="404"): + await prefect_client.read_flow(flow_id) + + async def test_create_then_read_deployment(prefect_client, storage_document_id): @flow def foo(): diff --git a/tests/concurrency/test_acquire_concurrency_slots.py b/tests/concurrency/test_acquire_concurrency_slots.py index c8a267f6cf78..a6d1817051a7 100644 --- a/tests/concurrency/test_acquire_concurrency_slots.py +++ b/tests/concurrency/test_acquire_concurrency_slots.py @@ -4,7 +4,9 @@ from httpx import Response from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse -from prefect.concurrency.asyncio import _acquire_concurrency_slots +from prefect.concurrency.asyncio import ( + _aacquire_concurrency_slots, +) async def test_calls_increment_client_method(): @@ -21,7 +23,7 @@ async def test_calls_increment_client_method(): ) increment_concurrency_slots.return_value = response - await _acquire_concurrency_slots( + await _aacquire_concurrency_slots( names=["test-1", "test-2"], slots=1, mode="concurrency" ) increment_concurrency_slots.assert_called_once_with( @@ -46,5 +48,5 @@ async def test_returns_minimal_concurrency_limit(): ) increment_concurrency_slots.return_value = response - result = await _acquire_concurrency_slots(["test-1", "test-2"], 1) + result = await _aacquire_concurrency_slots(["test-1", "test-2"], 1) assert result == limits diff --git a/tests/concurrency/test_concurrency_asyncio.py b/tests/concurrency/test_concurrency_asyncio.py index 56bd4af523aa..ff1306e95e01 100644 --- a/tests/concurrency/test_concurrency_asyncio.py +++ b/tests/concurrency/test_concurrency_asyncio.py @@ -7,8 +7,8 @@ from prefect import flow, task from prefect.concurrency.asyncio import ( ConcurrencySlotAcquisitionError, - _acquire_concurrency_slots, - _release_concurrency_slots, + _aacquire_concurrency_slots, + _arelease_concurrency_slots, concurrency, rate_limit, ) @@ -28,12 +28,12 @@ async def resource_heavy(): assert not executed with mock.patch( - "prefect.concurrency.asyncio._acquire_concurrency_slots", - wraps=_acquire_concurrency_slots, + "prefect.concurrency.asyncio._aacquire_concurrency_slots", + wraps=_aacquire_concurrency_slots, ) as acquire_spy: with mock.patch( - "prefect.concurrency.asyncio._release_concurrency_slots", - wraps=_release_concurrency_slots, + "prefect.concurrency.asyncio._arelease_concurrency_slots", + wraps=_arelease_concurrency_slots, ) as release_spy: await resource_heavy() @@ -93,7 +93,7 @@ async def my_flow(): state = await my_flow(return_state=True) assert state.is_failed() with pytest.raises(ConcurrencySlotAcquisitionError): - await state.result() + await state.result() # type: ignore[reportGeneralTypeIssues] async def test_concurrency_emits_events( @@ -112,7 +112,7 @@ async def resource_heavy(): await resource_heavy() - await asserting_events_worker.drain() + await asserting_events_worker.drain() # type: ignore[reportGeneralTypeIssues] assert isinstance(asserting_events_worker._client, AssertingEventsClient) assert len(asserting_events_worker._client.events) == 4 # 2 acquire, 2 release @@ -221,12 +221,12 @@ async def resource_heavy(): assert not executed with mock.patch( - "prefect.concurrency.asyncio._acquire_concurrency_slots", - wraps=_acquire_concurrency_slots, + "prefect.concurrency.asyncio._aacquire_concurrency_slots", + wraps=_aacquire_concurrency_slots, ) as acquire_spy: with mock.patch( - "prefect.concurrency.asyncio._release_concurrency_slots", - wraps=_release_concurrency_slots, + "prefect.concurrency.asyncio._arelease_concurrency_slots", + wraps=_arelease_concurrency_slots, ) as release_spy: await resource_heavy() @@ -281,7 +281,7 @@ async def my_flow(): state = await my_flow(return_state=True) assert state.is_failed() with pytest.raises(ConcurrencySlotAcquisitionError): - await state.result() + await state.result() # type: ignore[reportGeneralTypeIssues] async def test_rate_limit_emits_events( @@ -296,7 +296,7 @@ async def resource_heavy(): await resource_heavy() - await asserting_events_worker.drain() + await asserting_events_worker.drain() # type: ignore[reportGeneralTypeIssues] assert isinstance(asserting_events_worker._client, AssertingEventsClient) assert len(asserting_events_worker._client.events) == 2 @@ -373,11 +373,11 @@ async def resource_heavy(): assert not executed with mock.patch( - "prefect.concurrency.asyncio._acquire_concurrency_slots", + "prefect.concurrency.sync._acquire_concurrency_slots", wraps=lambda *args, **kwargs: None, ) as acquire_spy: with mock.patch( - "prefect.concurrency.asyncio._release_concurrency_slots", + "prefect.concurrency.sync._arelease_concurrency_slots", wraps=lambda *args, **kwargs: None, ) as release_spy: await resource_heavy() @@ -401,12 +401,12 @@ async def resource_heavy(): assert not executed with mock.patch( - "prefect.concurrency.asyncio._acquire_concurrency_slots", - wraps=_acquire_concurrency_slots, + "prefect.concurrency.asyncio._aacquire_concurrency_slots", + wraps=_aacquire_concurrency_slots, ) as acquire_spy: with mock.patch( - "prefect.concurrency.asyncio._release_concurrency_slots", - wraps=_release_concurrency_slots, + "prefect.concurrency.asyncio._arelease_concurrency_slots", + wraps=_arelease_concurrency_slots, ) as release_spy: await resource_heavy() @@ -443,11 +443,11 @@ async def resource_heavy(): assert not executed with mock.patch( - "prefect.concurrency.asyncio._acquire_concurrency_slots", + "prefect.concurrency.sync._acquire_concurrency_slots", wraps=lambda *args, **kwargs: None, ) as acquire_spy: with mock.patch( - "prefect.concurrency.asyncio._release_concurrency_slots", + "prefect.concurrency.sync._arelease_concurrency_slots", wraps=lambda *args, **kwargs: None, ) as release_spy: await resource_heavy() diff --git a/tests/concurrency/test_concurrency_sync.py b/tests/concurrency/test_concurrency_sync.py index 8be01fba5ed8..13724a097214 100644 --- a/tests/concurrency/test_concurrency_sync.py +++ b/tests/concurrency/test_concurrency_sync.py @@ -5,12 +5,13 @@ from starlette import status from prefect import flow, task -from prefect.concurrency.asyncio import ( - ConcurrencySlotAcquisitionError, +from prefect.concurrency.asyncio import ConcurrencySlotAcquisitionError +from prefect.concurrency.sync import ( _acquire_concurrency_slots, _release_concurrency_slots, + concurrency, + rate_limit, ) -from prefect.concurrency.sync import concurrency, rate_limit from prefect.events.clients import AssertingEventsClient from prefect.events.worker import EventsWorker from prefect.server.schemas.core import ConcurrencyLimitV2 @@ -43,7 +44,6 @@ def resource_heavy(): create_if_missing=None, max_retries=None, strict=False, - _sync=True, ) # On release we calculate how many seconds the slots were occupied @@ -275,7 +275,6 @@ def resource_heavy(): timeout_seconds=None, create_if_missing=None, strict=False, - _sync=True, ) # When used as a rate limit concurrency slots are not explicitly diff --git a/tests/concurrency/test_release_concurrency_slots.py b/tests/concurrency/test_release_concurrency_slots.py index a5f66f2d1a81..98d477f724f3 100644 --- a/tests/concurrency/test_release_concurrency_slots.py +++ b/tests/concurrency/test_release_concurrency_slots.py @@ -4,7 +4,9 @@ from httpx import Response from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse -from prefect.concurrency.asyncio import _release_concurrency_slots +from prefect.concurrency.asyncio import ( + _arelease_concurrency_slots, +) async def test_calls_release_client_method(): @@ -21,7 +23,7 @@ async def test_calls_release_client_method(): ) client_release_concurrency_slots.return_value = response - await _release_concurrency_slots( + await _arelease_concurrency_slots( names=["test-1", "test-2"], slots=1, occupancy_seconds=1.0 ) client_release_concurrency_slots.assert_called_once_with( @@ -45,5 +47,5 @@ async def test_returns_minimal_concurrency_limit(): ) client_release_concurrency_slots.return_value = response - result = await _release_concurrency_slots(["test-1", "test-2"], 1, 1.0) + result = await _arelease_concurrency_slots(["test-1", "test-2"], 1, 1.0) assert result == limits diff --git a/tests/concurrency/v1/test_increment_concurrency_limits.py b/tests/concurrency/v1/test_increment_concurrency_limits.py index 364403410c19..1ee3f0894349 100644 --- a/tests/concurrency/v1/test_increment_concurrency_limits.py +++ b/tests/concurrency/v1/test_increment_concurrency_limits.py @@ -4,7 +4,9 @@ from httpx import Response from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse -from prefect.concurrency.asyncio import _acquire_concurrency_slots +from prefect.concurrency.asyncio import ( + _aacquire_concurrency_slots, +) async def test_calls_increment_client_method(): @@ -25,7 +27,7 @@ async def test_calls_increment_client_method(): ) increment_concurrency_slots.return_value = response - await _acquire_concurrency_slots( + await _aacquire_concurrency_slots( names=["test-1", "test-2"], slots=1, mode="concurrency" ) increment_concurrency_slots.assert_called_once_with( @@ -54,5 +56,5 @@ async def test_returns_minimal_concurrency_limit(): ) increment_concurrency_slots.return_value = response - result = await _acquire_concurrency_slots(["test-1", "test-2"], 1) + result = await _aacquire_concurrency_slots(["test-1", "test-2"], 1) assert result == limits diff --git a/tests/deployment/test_steps.py b/tests/deployment/test_steps.py index 9da22636da35..51d123e926cb 100644 --- a/tests/deployment/test_steps.py +++ b/tests/deployment/test_steps.py @@ -15,6 +15,7 @@ from prefect.client.orchestration import PrefectClient from prefect.deployments.steps import run_step from prefect.deployments.steps.core import StepExecutionError, run_steps +from prefect.deployments.steps.pull import agit_clone from prefect.deployments.steps.utility import run_shell_script from prefect.testing.utilities import AsyncMock, MagicMock from prefect.utilities.filesystem import tmpchdir @@ -517,25 +518,151 @@ async def mock_sleep(seconds): monkeypatch.setattr("asyncio.sleep", mock_sleep) + with caplog.at_level("WARNING"): + result = await agit_clone(repository="https://github.com/org/repo.git") + + assert "Octocat went out to lunch" in caplog.text + assert "Octocat is playing chess in the break room" in caplog.text + + assert result == {"directory": "repo"} + + expected_call = call( + url="https://github.com/org/repo.git", + credentials=None, + branch=None, + include_submodules=False, + ) + + assert mock_git_repo.call_args_list == [expected_call] + + async def test_agit_clone_basic(self, git_repository_mock): + """Test basic async git clone functionality""" + output = await agit_clone(repository="https://github.com/org/repo.git") + + assert output["directory"] == "repo" + git_repository_mock.assert_called_once_with( + url="https://github.com/org/repo.git", + credentials=None, + branch=None, + include_submodules=False, + ) + git_repository_mock.return_value.pull_code.assert_awaited_once() + + async def test_agit_clone_with_all_options(self, git_repository_mock): + """Test async git clone with all options specified""" + await Secret(value="my-access-token").save(name="test-token") + + output = await agit_clone( + repository="https://github.com/org/repo.git", + branch="dev", + include_submodules=True, + access_token="my-access-token", + ) + + assert output["directory"] == "repo" + git_repository_mock.assert_called_once_with( + url="https://github.com/org/repo.git", + credentials={"access_token": "my-access-token"}, + branch="dev", + include_submodules=True, + ) + git_repository_mock.return_value.pull_code.assert_awaited_once() + + async def test_agit_clone_with_credentials_block(self, git_repository_mock): + """Test async git clone with credentials block""" + + class MockGitCredentials(Block): + username: str + password: str + + creds = MockGitCredentials(username="marvin42", password="hunter2") + + output = await agit_clone( + repository="https://github.com/org/repo.git", credentials=creds + ) + + assert output["directory"] == "repo" + git_repository_mock.assert_called_once_with( + url="https://github.com/org/repo.git", + credentials=creds, + branch=None, + include_submodules=False, + ) + git_repository_mock.return_value.pull_code.assert_awaited_once() + + async def test_agit_clone_raises_on_both_auth_methods(self): + """Test that providing both access_token and credentials raises an error""" + with pytest.raises( + ValueError, + match="Please provide either an access token or credentials but not both", + ): + await agit_clone( + repository="https://github.com/org/repo.git", + access_token="token", + credentials=MagicMock(), + ) + + async def test_agit_clone_retry(self, monkeypatch, caplog): + """Test retry behavior of async git clone""" + mock_git_repo = MagicMock() + mock_git_repo.return_value.pull_code = AsyncMock( + side_effect=[ + RuntimeError("Network timeout"), + RuntimeError("Server busy"), + None, # Success on third try + ] + ) + mock_git_repo.return_value.destination.relative_to.return_value = "repo" + monkeypatch.setattr( + "prefect.deployments.steps.pull.GitRepository", mock_git_repo + ) + + async def mock_sleep(seconds): + pass + + monkeypatch.setattr("asyncio.sleep", mock_sleep) + + with caplog.at_level("WARNING"): + result = await agit_clone(repository="https://github.com/org/repo.git") + + assert result == {"directory": "repo"} + assert mock_git_repo.return_value.pull_code.await_count == 3 + assert "Network timeout" in caplog.text + assert "Server busy" in caplog.text + + async def test_agit_clone_via_steps(self, monkeypatch, caplog): + """Test that async-only steps work when called via step syntax""" + mock_git_repo = MagicMock() + mock_git_repo.return_value.pull_code = AsyncMock( + side_effect=[ + RuntimeError("Network timeout"), + RuntimeError("Server busy"), + None, # Success on third try + ] + ) + mock_git_repo.return_value.destination.relative_to.return_value = "repo" + monkeypatch.setattr( + "prefect.deployments.steps.pull.GitRepository", mock_git_repo + ) + + async def mock_sleep(seconds): + pass + + monkeypatch.setattr("asyncio.sleep", mock_sleep) + with caplog.at_level("WARNING"): result = await run_step( { - "prefect.deployments.steps.git_clone": { + "prefect.deployments.steps.pull.agit_clone": { "repository": "https://github.com/org/repo.git" } } ) - assert ( - "Attempt 1 of function 'git_clone' failed with RuntimeError. Retrying in " - in caplog.text - ) - assert ( - "Attempt 2 of function 'git_clone' failed with RuntimeError. Retrying in " - in caplog.text - ) - assert result == {"directory": "repo"} + assert mock_git_repo.return_value.pull_code.await_count == 3 + assert "Network timeout" in caplog.text + assert "Server busy" in caplog.text expected_call = call( url="https://github.com/org/repo.git", @@ -543,8 +670,7 @@ async def mock_sleep(seconds): branch=None, include_submodules=False, ) - - assert mock_git_repo.call_args_list == [expected_call] * 3 + assert mock_git_repo.call_args_list == [expected_call] class TestPullFromRemoteStorage: diff --git a/tests/events/client/test_events_client.py b/tests/events/client/test_events_client.py index 9acf2ef82b2b..5b6970ae7e3b 100644 --- a/tests/events/client/test_events_client.py +++ b/tests/events/client/test_events_client.py @@ -359,7 +359,7 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): def mock_connect(*args, **kwargs): return MockConnect() - monkeypatch.setattr("prefect.events.clients.connect", mock_connect) + monkeypatch.setattr("prefect.events.clients.websocket_connect", mock_connect) with caplog.at_level(logging.WARNING): with pytest.raises(Exception, match="Connection failed"): diff --git a/tests/events/client/test_events_emit_event.py b/tests/events/client/test_events_emit_event.py index e31e21592fdc..50a0a4a23bae 100644 --- a/tests/events/client/test_events_emit_event.py +++ b/tests/events/client/test_events_emit_event.py @@ -3,7 +3,6 @@ from uuid import UUID import pendulum -from pydantic_extra_types.pendulum_dt import DateTime from prefect.events import emit_event from prefect.events.clients import AssertingEventsClient @@ -12,6 +11,7 @@ PREFECT_API_URL, temporary_settings, ) +from prefect.types import DateTime def test_emits_simple_event(asserting_events_worker: EventsWorker, reset_worker_events): diff --git a/tests/events/client/test_events_schema.py b/tests/events/client/test_events_schema.py index c5124ce27c6d..a29d58dae83c 100644 --- a/tests/events/client/test_events_schema.py +++ b/tests/events/client/test_events_schema.py @@ -3,9 +3,9 @@ from uuid import UUID, uuid4 import pytest -from pydantic_extra_types.pendulum_dt import DateTime from prefect.events import Event, RelatedResource, Resource +from prefect.types import DateTime def test_client_events_generate_an_id_by_default(): diff --git a/tests/events/client/test_events_worker.py b/tests/events/client/test_events_worker.py index b338c49dd1dd..5170f45a9b8c 100644 --- a/tests/events/client/test_events_worker.py +++ b/tests/events/client/test_events_worker.py @@ -9,6 +9,7 @@ AssertingEventsClient, PrefectEventsClient, ) +from prefect.events.utilities import emit_event from prefect.events.worker import EventsWorker from prefect.settings import ( PREFECT_API_URL, @@ -88,3 +89,30 @@ def emitting_flow(): assert event.related[1].id == f"prefect.flow.{db_flow.id}" assert event.related[1].role == "flow" assert event.related[1]["prefect.resource.name"] == db_flow.name + + +async def test_does_not_include_related_resources_from_run_context_for_lineage_events( + asserting_events_worker: EventsWorker, + reset_worker_events, + prefect_client, +): + @flow + def emitting_flow(): + emit_event( + event="s3.read", + resource={ + "prefect.resource.id": "s3://bucket-name/key-name", + "prefect.resource.role": "data-source", + "prefect.resource.lineage-group": "global", + }, + ) + + emitting_flow(return_state=True) + + await asserting_events_worker.drain() + + assert len(asserting_events_worker._client.events) == 1 + event = asserting_events_worker._client.events[0] + assert event.event == "s3.read" + assert event.resource.id == "s3://bucket-name/key-name" + assert len(event.related) == 0 diff --git a/tests/events/server/actions/test_actions_service.py b/tests/events/server/actions/test_actions_service.py index 4f98488e44f6..ef250327d415 100644 --- a/tests/events/server/actions/test_actions_service.py +++ b/tests/events/server/actions/test_actions_service.py @@ -3,13 +3,13 @@ import pendulum import pytest -from pendulum.datetime import DateTime from prefect.server.events import actions from prefect.server.events.clients import AssertingEventsClient from prefect.server.events.schemas.automations import TriggeredAction from prefect.server.utilities.messaging import MessageHandler from prefect.server.utilities.messaging.memory import MemoryMessage +from prefect.types import DateTime @pytest.fixture diff --git a/tests/events/server/actions/test_calling_webhook.py b/tests/events/server/actions/test_calling_webhook.py index 2c4ea5f1cae6..bf75ddaec2af 100644 --- a/tests/events/server/actions/test_calling_webhook.py +++ b/tests/events/server/actions/test_calling_webhook.py @@ -8,7 +8,6 @@ import pendulum import pytest from httpx import Response -from pendulum.datetime import DateTime from pydantic import TypeAdapter from sqlalchemy.ext.asyncio import AsyncSession @@ -35,6 +34,7 @@ from prefect.server.models import deployments, flow_runs, flows, work_queues from prefect.server.schemas.actions import WorkQueueCreate from prefect.server.schemas.core import Deployment, Flow, FlowRun, WorkQueue +from prefect.types import DateTime @pytest.fixture diff --git a/tests/events/server/actions/test_jinja_templated_action.py b/tests/events/server/actions/test_jinja_templated_action.py index fca5279c9be1..793c91a8ebb8 100644 --- a/tests/events/server/actions/test_jinja_templated_action.py +++ b/tests/events/server/actions/test_jinja_templated_action.py @@ -6,7 +6,6 @@ import pendulum import pytest -from pendulum.datetime import DateTime from pydantic import Field, ValidationInfo, field_validator from sqlalchemy.ext.asyncio import AsyncSession @@ -48,6 +47,7 @@ from prefect.server.schemas.responses import FlowRunResponse from prefect.server.schemas.states import State, StateType from prefect.settings import PREFECT_UI_URL, temporary_settings +from prefect.types import DateTime @pytest.fixture(autouse=True) diff --git a/tests/events/server/actions/test_pausing_resuming_automation.py b/tests/events/server/actions/test_pausing_resuming_automation.py index 6a092be21e14..3c4b0af243a9 100644 --- a/tests/events/server/actions/test_pausing_resuming_automation.py +++ b/tests/events/server/actions/test_pausing_resuming_automation.py @@ -4,7 +4,6 @@ import pendulum import pytest -from pendulum.datetime import DateTime from pydantic import ValidationError from sqlalchemy.ext.asyncio import AsyncSession @@ -25,6 +24,7 @@ PREFECT_API_SERVICES_TRIGGERS_ENABLED, temporary_settings, ) +from prefect.types import DateTime from prefect.utilities.pydantic import parse_obj_as diff --git a/tests/events/server/actions/test_pausing_resuming_deployment.py b/tests/events/server/actions/test_pausing_resuming_deployment.py index fe5c669c6f71..7e0bb4d66fe2 100644 --- a/tests/events/server/actions/test_pausing_resuming_deployment.py +++ b/tests/events/server/actions/test_pausing_resuming_deployment.py @@ -4,7 +4,6 @@ import pendulum import pytest -from pendulum.datetime import DateTime from pydantic import ValidationError from sqlalchemy.ext.asyncio import AsyncSession @@ -23,6 +22,7 @@ from prefect.server.schemas.actions import DeploymentScheduleCreate from prefect.server.schemas.core import Deployment, Flow from prefect.server.schemas.schedules import IntervalSchedule +from prefect.types import DateTime from prefect.utilities.pydantic import parse_obj_as diff --git a/tests/events/server/actions/test_pausing_resuming_work_pool.py b/tests/events/server/actions/test_pausing_resuming_work_pool.py index 7f8ad8b162e3..d9cf20cc2471 100644 --- a/tests/events/server/actions/test_pausing_resuming_work_pool.py +++ b/tests/events/server/actions/test_pausing_resuming_work_pool.py @@ -3,7 +3,6 @@ from uuid import UUID, uuid4 import pytest -from pendulum.datetime import DateTime from pydantic import ValidationError from sqlalchemy.ext.asyncio import AsyncSession @@ -21,6 +20,7 @@ from prefect.server.events.schemas.events import ReceivedEvent, RelatedResource from prefect.server.models import workers from prefect.server.schemas.actions import WorkPoolCreate +from prefect.types import DateTime from prefect.utilities.pydantic import parse_obj_as if TYPE_CHECKING: diff --git a/tests/events/server/actions/test_pausing_resuming_work_queue.py b/tests/events/server/actions/test_pausing_resuming_work_queue.py index 395e0a146470..283ed8fe35be 100644 --- a/tests/events/server/actions/test_pausing_resuming_work_queue.py +++ b/tests/events/server/actions/test_pausing_resuming_work_queue.py @@ -4,7 +4,6 @@ import pendulum import pytest -from pendulum.datetime import DateTime from pydantic import ValidationError from sqlalchemy.ext.asyncio import AsyncSession @@ -22,6 +21,7 @@ from prefect.server.models import work_queues from prefect.server.schemas.actions import WorkQueueCreate, WorkQueueUpdate from prefect.server.schemas.core import WorkQueue +from prefect.types import DateTime from prefect.utilities.pydantic import parse_obj_as diff --git a/tests/events/server/conftest.py b/tests/events/server/conftest.py index 41759cce7072..e66541d695dd 100644 --- a/tests/events/server/conftest.py +++ b/tests/events/server/conftest.py @@ -6,7 +6,6 @@ import pendulum import pytest import sqlalchemy as sa -from pendulum.datetime import DateTime from sqlalchemy.ext.asyncio import AsyncSession from prefect.server.database.interface import PrefectDBInterface @@ -21,6 +20,7 @@ ) from prefect.server.events.schemas.events import ReceivedEvent from prefect.server.utilities.messaging import Message +from prefect.types import DateTime from prefect.utilities.pydantic import parse_obj_as diff --git a/tests/events/server/models/test_composite_trigger_child_firing.py b/tests/events/server/models/test_composite_trigger_child_firing.py index 5a592e035852..4a1d776d6fd5 100644 --- a/tests/events/server/models/test_composite_trigger_child_firing.py +++ b/tests/events/server/models/test_composite_trigger_child_firing.py @@ -2,7 +2,6 @@ from uuid import uuid4 import pytest -from pendulum.datetime import DateTime from sqlalchemy.ext.asyncio import AsyncSession from prefect.server.events import actions @@ -23,6 +22,7 @@ ) from prefect.server.events.schemas.events import ReceivedEvent from prefect.server.events.triggers import load_automation +from prefect.types import DateTime @pytest.fixture diff --git a/tests/events/server/storage/test_event_persister.py b/tests/events/server/storage/test_event_persister.py index 8d764300194e..a7ea13d018ba 100644 --- a/tests/events/server/storage/test_event_persister.py +++ b/tests/events/server/storage/test_event_persister.py @@ -6,7 +6,6 @@ import pytest import sqlalchemy as sa from pydantic import ValidationError -from pydantic_extra_types.pendulum_dt import DateTime from sqlalchemy.ext.asyncio import AsyncSession from prefect.server.database.dependencies import db_injector @@ -17,6 +16,7 @@ from prefect.server.events.storage.database import query_events, write_events from prefect.server.utilities.messaging import CapturedMessage, Message, MessageHandler from prefect.settings import PREFECT_EVENTS_RETENTION_PERIOD, temporary_settings +from prefect.types import DateTime if TYPE_CHECKING: from prefect.server.database.orm_models import ORMEventResource diff --git a/tests/events/server/test_automations_api.py b/tests/events/server/test_automations_api.py index befc3140ffc6..d02cf6df92a4 100644 --- a/tests/events/server/test_automations_api.py +++ b/tests/events/server/test_automations_api.py @@ -11,7 +11,6 @@ import sqlalchemy as sa from fastapi.applications import FastAPI from httpx import ASGITransport, AsyncClient -from pendulum.datetime import DateTime from sqlalchemy.ext.asyncio import AsyncSession from prefect.server import models as server_models @@ -40,6 +39,7 @@ PREFECT_API_SERVICES_TRIGGERS_ENABLED, temporary_settings, ) +from prefect.types import DateTime from prefect.utilities.pydantic import parse_obj_as diff --git a/tests/events/server/test_clients.py b/tests/events/server/test_clients.py index 841ce6cda6e0..1f71bddb3abc 100644 --- a/tests/events/server/test_clients.py +++ b/tests/events/server/test_clients.py @@ -5,7 +5,6 @@ import pendulum import pytest -from pendulum.datetime import DateTime from prefect.server.events.clients import ( AssertingEventsClient, @@ -14,6 +13,7 @@ ) from prefect.server.events.schemas.events import Event, ReceivedEvent, RelatedResource from prefect.server.utilities.messaging import CapturingPublisher +from prefect.types import DateTime @pytest.fixture diff --git a/tests/events/server/test_events_api.py b/tests/events/server/test_events_api.py index d068251441b4..f7dc36961abb 100644 --- a/tests/events/server/test_events_api.py +++ b/tests/events/server/test_events_api.py @@ -7,7 +7,6 @@ import pendulum import pytest from httpx import AsyncClient -from pendulum.datetime import DateTime from pydantic.networks import AnyHttpUrl from prefect.server.events.counting import Countable, TimeUnit @@ -23,6 +22,7 @@ Resource, ) from prefect.server.events.storage import INTERACTIVE_PAGE_SIZE, InvalidTokenError +from prefect.types import DateTime from prefect.utilities.pydantic import parse_obj_as diff --git a/tests/events/server/test_events_counts.py b/tests/events/server/test_events_counts.py index 7e10960b9d50..7eb2c313f496 100644 --- a/tests/events/server/test_events_counts.py +++ b/tests/events/server/test_events_counts.py @@ -5,7 +5,6 @@ import pendulum import pytest -from pydantic_extra_types.pendulum_dt import Date, DateTime from sqlalchemy.ext.asyncio import AsyncSession from prefect.server.events.counting import PIVOT_DATETIME, Countable, TimeUnit @@ -18,6 +17,7 @@ count_events, write_events, ) +from prefect.types import Date, DateTime # Note: the counts in this module are sensitive to the number and shape of events # we produce in conftest.py and may need to be adjusted if we make changes. diff --git a/tests/events/server/test_events_schema.py b/tests/events/server/test_events_schema.py index 2d12c23fcfc4..a7f5ccf43ca7 100644 --- a/tests/events/server/test_events_schema.py +++ b/tests/events/server/test_events_schema.py @@ -4,7 +4,6 @@ import pendulum import pytest -from pendulum.datetime import DateTime from pydantic import ValidationError from prefect.server.events.schemas.events import ( @@ -13,6 +12,7 @@ RelatedResource, Resource, ) +from prefect.types import DateTime def test_client_events_do_not_have_defaults_for_the_fields_it_seems_they_should(): diff --git a/tests/events/server/triggers/test_basics.py b/tests/events/server/triggers/test_basics.py index 6c3fe9e7fefa..e5a9d9f83c45 100644 --- a/tests/events/server/triggers/test_basics.py +++ b/tests/events/server/triggers/test_basics.py @@ -4,7 +4,6 @@ from uuid import uuid4 import pytest -from pendulum.datetime import DateTime from sqlalchemy.ext.asyncio import AsyncSession from prefect.server.events import actions, triggers @@ -18,6 +17,7 @@ ) from prefect.server.events.schemas.events import ReceivedEvent, matches from prefect.settings import PREFECT_EVENTS_EXPIRED_BUCKET_BUFFER +from prefect.types import DateTime def test_triggers_have_identifiers(arachnophobia: Automation): diff --git a/tests/events/server/triggers/test_composite_triggers.py b/tests/events/server/triggers/test_composite_triggers.py index e0d79d99bb65..728bc1fa88f3 100644 --- a/tests/events/server/triggers/test_composite_triggers.py +++ b/tests/events/server/triggers/test_composite_triggers.py @@ -5,7 +5,6 @@ import pendulum import pytest -from pendulum.datetime import DateTime from sqlalchemy.ext.asyncio import AsyncSession from prefect.server.database.interface import PrefectDBInterface @@ -21,6 +20,7 @@ TriggerState, ) from prefect.server.events.schemas.events import ReceivedEvent +from prefect.types import DateTime @pytest.fixture diff --git a/tests/events/server/triggers/test_flow_run_slas.py b/tests/events/server/triggers/test_flow_run_slas.py index 50e9aaec6a01..e01ec83e31cb 100644 --- a/tests/events/server/triggers/test_flow_run_slas.py +++ b/tests/events/server/triggers/test_flow_run_slas.py @@ -5,7 +5,6 @@ import pendulum import pytest -from pendulum.datetime import DateTime from sqlalchemy.ext.asyncio import AsyncSession from prefect.server.events import actions, triggers @@ -18,6 +17,7 @@ TriggerState, ) from prefect.server.events.schemas.events import Event, ReceivedEvent +from prefect.types import DateTime @pytest.fixture diff --git a/tests/events/server/triggers/test_service.py b/tests/events/server/triggers/test_service.py index c1c4d9f49afb..171d683b382e 100644 --- a/tests/events/server/triggers/test_service.py +++ b/tests/events/server/triggers/test_service.py @@ -7,7 +7,6 @@ import pendulum import pytest -from pendulum.datetime import DateTime from sqlalchemy.ext.asyncio import AsyncSession from prefect.server.events import actions, triggers @@ -22,6 +21,7 @@ ) from prefect.server.utilities.messaging import MessageHandler from prefect.server.utilities.messaging.memory import MemoryMessage +from prefect.types import DateTime async def test_acting_publishes_an_action_message_from_a_reactive_event( diff --git a/tests/experimental/test_lineage.py b/tests/experimental/test_lineage.py new file mode 100644 index 000000000000..77a33e6da7d4 --- /dev/null +++ b/tests/experimental/test_lineage.py @@ -0,0 +1,339 @@ +from unittest.mock import patch + +import pytest + +from prefect._experimental.lineage import ( + emit_lineage_event, + emit_result_read_event, + emit_result_write_event, + get_result_resource_uri, +) +from prefect.events.schemas.events import RelatedResource +from prefect.filesystems import ( + LocalFileSystem, + WritableDeploymentStorage, + WritableFileSystem, +) +from prefect.results import ResultStore + + +@pytest.fixture +async def local_storage(tmp_path): + return LocalFileSystem(basepath=str(tmp_path)) + + +@pytest.fixture +def result_store(local_storage): + return ResultStore(result_storage=local_storage) + + +@pytest.fixture +def mock_emit_event(): + """Mock the emit_event function used by all lineage event emission.""" + with patch("prefect._experimental.lineage.emit_event") as mock: + yield mock + + +class CustomStorage(WritableFileSystem, WritableDeploymentStorage): + _block_type_slug = "custom-storage" + + def _resolve_path(self, path): + return path + + @classmethod + def get_block_type_slug(cls): + return "custom-storage" + + def get_directory(self, path: str) -> str: + raise NotImplementedError + + def put_directory(self, path: str, directory_path: str) -> None: + raise NotImplementedError + + def read_path(self, path: str) -> bytes: + raise NotImplementedError + + def write_path(self, path: str, contents: bytes) -> None: + raise NotImplementedError + + +async def test_get_result_resource_uri_with_local_storage(local_storage): + uri = get_result_resource_uri(ResultStore(result_storage=local_storage), "test-key") + assert uri is not None + assert uri.startswith("file://") + assert uri.endswith("/test-key") + + +async def test_get_resource_uri_with_none_storage(): + store = ResultStore(result_storage=None) + uri = get_result_resource_uri(store, "test-key") + assert uri is None + + +async def test_get_resource_uri_with_unknown_storage(): + store = ResultStore(result_storage=CustomStorage()) + uri = get_result_resource_uri(store, "test-key") + assert uri == "prefect://custom-storage/test-key" + + +@pytest.mark.parametrize( + "block_type,expected_prefix", + [ + ("local-file-system", "file://"), + ("s3-bucket", "s3://"), + ("gcs-bucket", "gs://"), + ("azure-blob-storage", "azure-blob://"), + ], +) +async def test_get_resource_uri_block_type_mapping(block_type, expected_prefix): + if block_type == "local-file-system": + cls = LocalFileSystem + else: + + class MockStorage(CustomStorage): + _block_type_slug = block_type + + def _resolve_path(self, path): + return path + + @classmethod + def get_block_type_slug(cls): + return block_type + + # Add required attributes based on block type + bucket_name: str = "test-bucket" + bucket: str = "test-bucket" + container_name: str = "test-container" + + cls = MockStorage + + store = ResultStore(result_storage=cls()) + uri = get_result_resource_uri(store, "test-key") + assert uri is not None + assert uri.startswith(expected_prefix), f"Failed for {block_type}" + + +class TestEmitLineageEvent: + async def test_emit_lineage_event_with_upstream_and_downstream( + self, enable_lineage_events, mock_emit_event + ): + await emit_lineage_event( + event_name="test.event", + upstream_resources=[ + { + "prefect.resource.id": "upstream1", + "prefect.resource.role": "some-purpose", + }, + { + "prefect.resource.id": "upstream2", + "prefect.resource.role": "some-purpose", + }, + ], + downstream_resources=[ + { + "prefect.resource.id": "downstream1", + "prefect.resource.lineage-group": "global", + }, + { + "prefect.resource.id": "downstream2", + "prefect.resource.lineage-group": "global", + }, + ], + ) + + assert mock_emit_event.call_count == 2 # One call per downstream resource + + # Check first downstream resource event + first_call = mock_emit_event.call_args_list[0] + assert first_call.kwargs["event"] == "test.event" + assert first_call.kwargs["resource"] == { + "prefect.resource.id": "downstream1", + "prefect.resource.lineage-group": "global", + } + assert first_call.kwargs["related"] == [ + { + "prefect.resource.id": "upstream1", + "prefect.resource.role": "some-purpose", + }, + { + "prefect.resource.id": "upstream2", + "prefect.resource.role": "some-purpose", + }, + ] + + # Check second downstream resource event + second_call = mock_emit_event.call_args_list[1] + assert second_call.kwargs["event"] == "test.event" + assert second_call.kwargs["resource"] == { + "prefect.resource.id": "downstream2", + "prefect.resource.lineage-group": "global", + } + assert second_call.kwargs["related"] == [ + { + "prefect.resource.id": "upstream1", + "prefect.resource.role": "some-purpose", + }, + { + "prefect.resource.id": "upstream2", + "prefect.resource.role": "some-purpose", + }, + ] + + async def test_emit_lineage_event_with_no_resources( + self, enable_lineage_events, mock_emit_event + ): + await emit_lineage_event(event_name="test.event") + mock_emit_event.assert_not_called() + + async def test_emit_lineage_event_disabled(self, mock_emit_event): + await emit_lineage_event( + event_name="test.event", + upstream_resources=[ + { + "prefect.resource.id": "upstream", + "prefect.resource.role": "some-purpose", + } + ], + downstream_resources=[ + { + "prefect.resource.id": "downstream", + "prefect.resource.lineage-group": "global", + "prefect.resource.role": "result", + } + ], + ) + mock_emit_event.assert_not_called() + + +class TestEmitResultEvents: + async def test_emit_result_read_event( + self, result_store, enable_lineage_events, mock_emit_event + ): + await emit_result_read_event( + result_store, + "test-key", + [ + { + "prefect.resource.id": "downstream", + "prefect.resource.role": "flow-run", + } + ], + ) + + mock_emit_event.assert_called_once() + call_args = mock_emit_event.call_args.kwargs + assert call_args["event"] == "prefect.result.read" + resource_uri = get_result_resource_uri(result_store, "test-key") + assert resource_uri is not None + assert call_args["resource"] == { + "prefect.resource.id": "downstream", + "prefect.resource.lineage-group": "global", + "prefect.resource.role": "flow-run", + } + assert call_args["related"] == [ + RelatedResource( + root={ + "prefect.resource.id": resource_uri, + "prefect.resource.role": "result", + } + ) + ] + + async def test_emit_result_write_event( + self, result_store, enable_lineage_events, mock_emit_event + ): + await emit_result_write_event(result_store, "test-key") + + mock_emit_event.assert_called_once() + call_args = mock_emit_event.call_args.kwargs + assert call_args["event"] == "prefect.result.write" + assert call_args["resource"] == { + "prefect.resource.id": get_result_resource_uri(result_store, "test-key"), + "prefect.resource.lineage-group": "global", + "prefect.resource.role": "result", + } + assert call_args["related"] == [] + + async def test_emit_result_read_event_with_none_uri( + self, enable_lineage_events, mock_emit_event + ): + store = ResultStore(result_storage=None) + await emit_result_read_event( + store, + "test-key", + [ + { + "prefect.resource.id": "downstream", + "prefect.resource.role": "flow-run", + } + ], + ) + mock_emit_event.assert_not_called() + + async def test_emit_result_write_event_with_none_uri( + self, enable_lineage_events, mock_emit_event + ): + store = ResultStore(result_storage=None) + await emit_result_write_event(store, "test-key") + mock_emit_event.assert_not_called() + + async def test_emit_result_read_event_with_downstream_resources( + self, result_store, enable_lineage_events, mock_emit_event + ): + await emit_result_read_event( + result_store, + "test-key", + downstream_resources=[ + {"prefect.resource.id": "downstream1"}, + {"prefect.resource.id": "downstream2"}, + ], + ) + + calls = mock_emit_event.call_args_list + assert len(calls) == 2 + + for i, call in enumerate(calls): + resource_uri = get_result_resource_uri(result_store, "test-key") + assert resource_uri is not None + assert call.kwargs["event"] == "prefect.result.read" + assert call.kwargs["resource"] == { + "prefect.resource.id": f"downstream{i+1}", + "prefect.resource.lineage-group": "global", + } + assert call.kwargs["related"] == [ + RelatedResource( + root={ + "prefect.resource.id": resource_uri, + "prefect.resource.role": "result", + } + ) + ] + + async def test_emit_result_write_event_with_upstream_resources( + self, result_store, enable_lineage_events, mock_emit_event + ): + await emit_result_write_event( + result_store, + "test-key", + upstream_resources=[ + { + "prefect.resource.id": "upstream", + "prefect.resource.role": "my-role", + } + ], + ) + + resolved_key_path = result_store._resolved_key_path("test-key") + resource_uri = get_result_resource_uri(result_store, resolved_key_path) + + mock_emit_event.assert_called_once_with( + event="prefect.result.write", + resource={ + "prefect.resource.id": resource_uri, + "prefect.resource.lineage-group": "global", + "prefect.resource.role": "result", + }, + related=[ + {"prefect.resource.id": "upstream", "prefect.resource.role": "my-role"}, + ], + ) diff --git a/tests/results/test_result_store.py b/tests/results/test_result_store.py index f516af249c9d..5ec34a2720e1 100644 --- a/tests/results/test_result_store.py +++ b/tests/results/test_result_store.py @@ -1,3 +1,5 @@ +from unittest import mock + import pytest import prefect.exceptions @@ -889,3 +891,72 @@ async def test_deprecation_warning_on_persist_result(): with pytest.warns(DeprecationWarning): ResultStore(persist_result=False) + + +class TestResultStoreEmitsEvents: + async def test_result_store_emits_write_event( + self, tmp_path, enable_lineage_events + ): + filesystem = LocalFileSystem(basepath=tmp_path) + result_store = ResultStore(result_storage=filesystem) + + with mock.patch("prefect.results.emit_result_write_event") as mock_emit: + await result_store.awrite(key="test", obj="test") + resolved_key_path = result_store._resolved_key_path("test") + mock_emit.assert_called_once_with(result_store, resolved_key_path) + + async def test_result_store_emits_read_event(self, tmp_path, enable_lineage_events): + filesystem = LocalFileSystem(basepath=tmp_path) + result_store = ResultStore(result_storage=filesystem) + await result_store.awrite(key="test", obj="test") + + # Reading from a different result store allows us to test the read + # without the store's in-memory cache. + other_result_store = ResultStore(result_storage=filesystem) + + with mock.patch("prefect.results.emit_result_read_event") as mock_emit: + await other_result_store.aread(key="test") + resolved_key_path = other_result_store._resolved_key_path("test") + mock_emit.assert_called_once_with(other_result_store, resolved_key_path) + + async def test_result_store_emits_cached_read_event( + self, tmp_path, enable_lineage_events + ): + result_store = ResultStore( + cache_result_in_memory=True, + ) + await result_store.awrite(key="test", obj="test") + + with mock.patch("prefect.results.emit_result_read_event") as mock_emit: + await result_store.aread(key="test") # cached read + resolved_key_path = result_store._resolved_key_path("test") + mock_emit.assert_called_once_with( + result_store, + resolved_key_path, + cached=True, + ) + + async def test_result_store_does_not_emit_lineage_write_events_when_disabled( + self, tmp_path + ): + filesystem = LocalFileSystem(basepath=tmp_path) + result_store = ResultStore(result_storage=filesystem) + + with mock.patch( + "prefect._experimental.lineage.emit_lineage_event" + ) as mock_emit: + await result_store.awrite(key="test", obj="test") + mock_emit.assert_not_called() + + async def test_result_store_does_not_emit_lineage_read_events_when_disabled( + self, tmp_path + ): + filesystem = LocalFileSystem(basepath=tmp_path) + result_store = ResultStore(result_storage=filesystem) + await result_store.awrite(key="test", obj="test") + + with mock.patch( + "prefect._experimental.lineage.emit_lineage_event" + ) as mock_emit: + await result_store.aread(key="test") + mock_emit.assert_not_called() diff --git a/tests/runner/test_utils.py b/tests/runner/test_utils.py index 20d879225056..d27c846591b1 100644 --- a/tests/runner/test_utils.py +++ b/tests/runner/test_utils.py @@ -1,3 +1,4 @@ +from typing import Any, Callable from unittest.mock import create_autospec import pytest @@ -13,7 +14,7 @@ class MockRoute(APIRoute): - def __init__(self, path: str, endpoint: callable): + def __init__(self, path: str, endpoint: Callable[..., Any]): super().__init__(path, endpoint) diff --git a/tests/server/models/test_flow_runs.py b/tests/server/models/test_flow_runs.py index 7b68055cbd23..1023d329baa9 100644 --- a/tests/server/models/test_flow_runs.py +++ b/tests/server/models/test_flow_runs.py @@ -3,10 +3,13 @@ import pendulum import pytest import sqlalchemy as sa +from sqlalchemy.ext.asyncio import AsyncSession from prefect.server import models, schemas +from prefect.server.database import orm_models from prefect.server.exceptions import ObjectNotFoundError from prefect.server.schemas.core import TaskRunResult +from prefect.types import KeyValueLabels class TestCreateFlowRun: @@ -255,6 +258,77 @@ async def test_update_flow_run_returns_false_if_flow_run_does_not_exist( ) ) + async def test_update_flow_run_labels( + self, flow: orm_models.Flow, session: AsyncSession + ): + """Test that flow run labels can be updated by patching existing labels""" + + # Create a flow run with initial labels + initial_labels: KeyValueLabels = {"env": "test", "version": "1.0"} + flow_run = await models.flow_runs.create_flow_run( + session=session, + flow_run=schemas.core.FlowRun(flow_id=flow.id, labels=initial_labels), + ) + + # Update with new labels + new_labels: KeyValueLabels = {"version": "2.0", "new_key": "new_value"} + update_success = await models.flow_runs.update_flow_run_labels( + session=session, flow_run_id=flow_run.id, labels=new_labels + ) + assert update_success is True + + # Read the flow run back and verify labels were merged correctly + updated_flow_run = await models.flow_runs.read_flow_run( + session=session, flow_run_id=flow_run.id + ) + assert updated_flow_run + assert updated_flow_run.labels == { + "prefect.flow.id": str(flow.id), + "env": "test", # Kept from initial labels + "version": "2.0", # Updated from new labels + "new_key": "new_value", # Added from new labels + } + + async def test_update_flow_run_labels_raises_if_flow_run_does_not_exist( + self, session: AsyncSession, caplog: pytest.LogCaptureFixture + ): + """Test that updating labels for a non-existent flow run raises""" + with pytest.raises(ObjectNotFoundError) as exc: + await models.flow_runs.update_flow_run_labels( + session=session, flow_run_id=uuid4(), labels={"test": "label"} + ) + assert "Flow run with id" in str(exc.value) + + async def test_update_flow_run_labels_with_empty_initial_labels( + self, flow: orm_models.Flow, session: AsyncSession + ): + """Test that labels can be added to a flow run with no existing labels""" + + # Create a flow run with no labels + flow_run = await models.flow_runs.create_flow_run( + session=session, + flow_run=schemas.core.FlowRun( + flow_id=flow.id, + ), + ) + + # Update with new labels + new_labels: KeyValueLabels = {"env": "test", "version": "1.0"} + update_success = await models.flow_runs.update_flow_run_labels( + session=session, flow_run_id=flow_run.id, labels=new_labels + ) + assert update_success is True + + # Read the flow run back and verify labels were added + updated_flow_run = await models.flow_runs.read_flow_run( + session=session, flow_run_id=flow_run.id + ) + assert updated_flow_run + assert updated_flow_run.labels == { + "prefect.flow.id": str(flow.id), + **new_labels, + } + class TestReadFlowRun: async def test_read_flow_run(self, flow, session): diff --git a/tests/server/orchestration/api/ui/test_flows.py b/tests/server/orchestration/api/ui/test_flows.py index 68dd1adbfb73..1e57a694e594 100644 --- a/tests/server/orchestration/api/ui/test_flows.py +++ b/tests/server/orchestration/api/ui/test_flows.py @@ -1,10 +1,10 @@ import pendulum import pytest -from pydantic_extra_types.pendulum_dt import DateTime from prefect.server import models, schemas from prefect.server.api.ui.flows import SimpleNextFlowRun from prefect.server.database import orm_models +from prefect.types import DateTime @pytest.fixture diff --git a/tests/server/orchestration/api/ui/test_task_runs.py b/tests/server/orchestration/api/ui/test_task_runs.py index 0c736d5c6bf3..17c0a759449d 100644 --- a/tests/server/orchestration/api/ui/test_task_runs.py +++ b/tests/server/orchestration/api/ui/test_task_runs.py @@ -3,12 +3,12 @@ import pendulum import pytest from httpx import AsyncClient -from pydantic_extra_types.pendulum_dt import DateTime from sqlalchemy.ext.asyncio import AsyncSession from prefect.server import models from prefect.server.api.ui.task_runs import TaskRunCount from prefect.server.schemas import core, filters, states +from prefect.types import DateTime class TestReadDashboardTaskRunCounts: diff --git a/tests/telemetry/test_instrumentation.py b/tests/telemetry/test_instrumentation.py index a86e4f725b9e..b01c18a8ae4e 100644 --- a/tests/telemetry/test_instrumentation.py +++ b/tests/telemetry/test_instrumentation.py @@ -1,5 +1,6 @@ import os -from uuid import UUID +from typing import Literal +from uuid import UUID, uuid4 import pytest from opentelemetry import metrics, trace @@ -11,11 +12,21 @@ from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader from opentelemetry.sdk.trace import TracerProvider - +from tests.telemetry.instrumentation_tester import InstrumentationTester + +import prefect +from prefect import flow, task +from prefect.client.orchestration import SyncPrefectClient +from prefect.context import FlowRunContext +from prefect.flow_engine import run_flow_async, run_flow_sync +from prefect.task_engine import run_task_async, run_task_sync from prefect.telemetry.bootstrap import setup_telemetry -from prefect.telemetry.instrumentation import extract_account_and_workspace_id +from prefect.telemetry.instrumentation import ( + extract_account_and_workspace_id, +) from prefect.telemetry.logging import get_log_handler from prefect.telemetry.processors import InFlightSpanProcessor +from prefect.telemetry.run_telemetry import LABELS_TRACEPARENT_KEY def test_extract_account_and_workspace_id_valid_url( @@ -160,3 +171,505 @@ def test_logger_provider( log_handler = get_log_handler() assert isinstance(log_handler, LoggingHandler) assert log_handler._logger_provider == logger_provider + + +class TestFlowRunInstrumentation: + @pytest.fixture(params=["async", "sync"]) + async def engine_type( + self, request: pytest.FixtureRequest + ) -> Literal["async", "sync"]: + return request.param + + async def test_traceparent_propagates_from_server_side( + self, + engine_type: Literal["async", "sync"], + instrumentation: InstrumentationTester, + sync_prefect_client: SyncPrefectClient, + ): + """Test that when no parent traceparent exists, the flow run stores its own span's traceparent""" + + @flow + async def my_async_flow(): + pass + + @flow + def my_sync_flow(): + pass + + if engine_type == "async": + the_flow = my_async_flow + else: + the_flow = my_sync_flow + + flow_run = sync_prefect_client.create_flow_run(the_flow) # type: ignore + + # Give the flow run a traceparent. This can occur when the server has + # already created a trace for the run, likely because it was Late. + # + # Trace ID: 314419354619557650326501540139523824930 + # Span ID: 5357380918965115138 + sync_prefect_client.update_flow_run_labels( + flow_run.id, + { + LABELS_TRACEPARENT_KEY: "00-ec8af70b445d54387035c27eb182dd22-4a593d8fa95f1902-01" + }, + ) + + flow_run = sync_prefect_client.read_flow_run(flow_run.id) + assert flow_run.labels[LABELS_TRACEPARENT_KEY] == ( + "00-ec8af70b445d54387035c27eb182dd22-4a593d8fa95f1902-01" + ) + + if engine_type == "async": + await run_flow_async(the_flow, flow_run=flow_run) # type: ignore + else: + run_flow_sync(the_flow, flow_run=flow_run) # type: ignore + + assert flow_run.labels[LABELS_TRACEPARENT_KEY] == ( + "00-ec8af70b445d54387035c27eb182dd22-4a593d8fa95f1902-01" + ) + + spans = instrumentation.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + span_context = span.get_span_context() + assert span_context is not None + assert span_context.trace_id == 314419354619557650326501540139523824930 + + assert span.parent is not None + assert span.parent.trace_id == 314419354619557650326501540139523824930 + assert span.parent.span_id == 5357380918965115138 + + async def test_flow_run_creates_and_stores_otel_traceparent( + self, + engine_type: Literal["async", "sync"], + instrumentation: InstrumentationTester, + sync_prefect_client: SyncPrefectClient, + ): + """Test that when no parent traceparent exists, the flow run stores its own span's traceparent""" + + @flow(name="child-flow") + async def async_child_flow() -> str: + return "hello from child" + + @flow(name="child-flow") + def sync_child_flow() -> str: + return "hello from child" + + @flow(name="parent-flow") + async def async_parent_flow() -> str: + return await async_child_flow() + + @flow(name="parent-flow") + def sync_parent_flow() -> str: + return sync_child_flow() + + if engine_type == "async": + await async_parent_flow() + else: + sync_parent_flow() + + spans = instrumentation.get_finished_spans() + + next( + span + for span in spans + if span.attributes.get("prefect.flow.name") == "parent-flow" + ) + child_span = next( + span + for span in spans + if span.attributes.get("prefect.flow.name") == "child-flow" + ) + + # Get the child flow run + child_flow_run_id = child_span.attributes.get("prefect.run.id") + child_flow_run = sync_prefect_client.read_flow_run(UUID(child_flow_run_id)) + + # Verify the child flow run has its span's traceparent in its labels + assert "__OTEL_TRACEPARENT" in child_flow_run.labels + assert child_flow_run.labels["__OTEL_TRACEPARENT"].startswith("00-") + trace_id_hex = child_flow_run.labels["__OTEL_TRACEPARENT"].split("-")[1] + assert int(trace_id_hex, 16) == child_span.context.trace_id + + async def test_flow_run_propagates_otel_traceparent_to_subflow( + self, + engine_type: Literal["async", "sync"], + instrumentation: InstrumentationTester, + ): + """Test that OTEL traceparent gets propagated from parent flow to child flow""" + + @flow(name="child-flow") + async def async_child_flow() -> str: + return "hello from child" + + @flow(name="child-flow") + def sync_child_flow() -> str: + return "hello from child" + + @flow(name="parent-flow") + async def async_parent_flow(): + await async_child_flow() + + @flow(name="parent-flow") + def sync_parent_flow(): + sync_child_flow() + + parent_flow = async_parent_flow if engine_type == "async" else sync_parent_flow + await parent_flow() if engine_type == "async" else parent_flow() + + spans = instrumentation.get_finished_spans() + + parent_span = next( + span + for span in spans + if span.attributes.get("prefect.flow.name") == "parent-flow" + ) + child_span = next( + span + for span in spans + if span.attributes.get("prefect.flow.name") == "child-flow" + ) + + assert parent_span is not None + assert child_span is not None + assert child_span.context.trace_id == parent_span.context.trace_id + + async def test_flow_run_instrumentation( + self, + engine_type: Literal["async", "sync"], + instrumentation: InstrumentationTester, + ): + @flow(name="instrumented-flow") + async def async_flow() -> str: + return 42 + + @flow(name="instrumented-flow") + def sync_flow() -> str: + return 42 + + test_flow = async_flow if engine_type == "async" else sync_flow + await test_flow() if engine_type == "async" else test_flow() + + spans = instrumentation.get_finished_spans() + assert len(spans) == 1 + + span = spans[0] + assert span is not None + instrumentation.assert_span_instrumented_for(span, prefect) + + instrumentation.assert_has_attributes( + span, + { + "prefect.flow.name": "instrumented-flow", + "prefect.run.type": "flow", + }, + ) + + async def test_flow_run_inherits_parent_labels( + self, + engine_type: Literal["async", "sync"], + instrumentation: InstrumentationTester, + sync_prefect_client: SyncPrefectClient, + ): + """Test that parent flow labels get propagated to child flow spans""" + + @flow(name="child-flow") + async def async_child_flow() -> str: + return "hello from child" + + @flow(name="child-flow") + def sync_child_flow() -> str: + return "hello from child" + + @flow(name="parent-flow") + async def async_parent_flow() -> str: + # Set custom labels in parent flow + flow_run = FlowRunContext.get().flow_run + flow_run.labels.update( + {"test.label": "test-value", "environment": "testing"} + ) + return await async_child_flow() + + @flow(name="parent-flow") + def sync_parent_flow() -> str: + # Set custom labels in parent flow + flow_run = FlowRunContext.get().flow_run + flow_run.labels.update( + {"test.label": "test-value", "environment": "testing"} + ) + return sync_child_flow() + + if engine_type == "async": + state = await async_parent_flow(return_state=True) + else: + state = sync_parent_flow(return_state=True) + + spans = instrumentation.get_finished_spans() + child_spans = [ + span + for span in spans + if span.attributes.get("prefect.flow.name") == "child-flow" + ] + assert len(child_spans) == 1 + + # Get the parent flow run + parent_flow_run = sync_prefect_client.read_flow_run( + state.state_details.flow_run_id + ) + + # Verify the child span has the parent flow's labels + instrumentation.assert_has_attributes( + child_spans[0], + { + **parent_flow_run.labels, + "prefect.run.type": "flow", + "prefect.flow.name": "child-flow", + }, + ) + + +class TestTaskRunInstrumentation: + @pytest.fixture(params=["async", "sync"]) + async def engine_type( + self, request: pytest.FixtureRequest + ) -> Literal["async", "sync"]: + return request.param + + async def run_task(self, task, task_run_id, parameters, engine_type): + if engine_type == "async": + return await run_task_async( + task, task_run_id=task_run_id, parameters=parameters + ) + else: + return run_task_sync(task, task_run_id=task_run_id, parameters=parameters) + + async def test_span_creation( + self, + engine_type: Literal["async", "sync"], + instrumentation: InstrumentationTester, + ): + @task + async def async_task(x: int, y: int): + return x + y + + @task + def sync_task(x: int, y: int): + return x + y + + task_fn = async_task if engine_type == "async" else sync_task + task_run_id = uuid4() + + await self.run_task( + task_fn, + task_run_id=task_run_id, + parameters={"x": 1, "y": 2}, + engine_type=engine_type, + ) + + spans = instrumentation.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + instrumentation.assert_has_attributes( + span, {"prefect.run.id": str(task_run_id), "prefect.run.type": "task"} + ) + assert spans[0].name == task_fn.name + + async def test_span_attributes( + self, + engine_type: Literal["async", "sync"], + instrumentation: InstrumentationTester, + ): + @task + async def async_task(x: int, y: int): + return x + y + + @task + def sync_task(x: int, y: int): + return x + y + + task_fn = async_task if engine_type == "async" else sync_task + task_run_id = uuid4() + + await self.run_task( + task_fn, + task_run_id=task_run_id, + parameters={"x": 1, "y": 2}, + engine_type=engine_type, + ) + + spans = instrumentation.get_finished_spans() + assert len(spans) == 1 + instrumentation.assert_has_attributes( + spans[0], + { + "prefect.run.id": str(task_run_id), + "prefect.run.type": "task", + "prefect.run.parameter.x": "int", + "prefect.run.parameter.y": "int", + }, + ) + assert spans[0].name == task_fn.__name__ + + async def test_span_events( + self, + engine_type: Literal["async", "sync"], + instrumentation: InstrumentationTester, + ): + @task + async def async_task(x: int, y: int): + return x + y + + @task + def sync_task(x: int, y: int): + return x + y + + task_fn = async_task if engine_type == "async" else sync_task + task_run_id = uuid4() + + await self.run_task( + task_fn, + task_run_id=task_run_id, + parameters={"x": 1, "y": 2}, + engine_type=engine_type, + ) + + spans = instrumentation.get_finished_spans() + events = spans[0].events + assert len(events) == 2 + assert events[0].name == "Running" + assert events[1].name == "Completed" + + async def test_span_status_on_success( + self, + engine_type: Literal["async", "sync"], + instrumentation: InstrumentationTester, + ): + @task + async def async_task(x: int, y: int): + return x + y + + @task + def sync_task(x: int, y: int): + return x + y + + task_fn = async_task if engine_type == "async" else sync_task + task_run_id = uuid4() + + await self.run_task( + task_fn, + task_run_id=task_run_id, + parameters={"x": 1, "y": 2}, + engine_type=engine_type, + ) + + spans = instrumentation.get_finished_spans() + assert len(spans) == 1 + assert spans[0].status.status_code == trace.StatusCode.OK + + async def test_span_status_on_failure( + self, + engine_type: Literal["async", "sync"], + instrumentation: InstrumentationTester, + ): + @task + async def async_task(x: int, y: int): + raise ValueError("Test error") + + @task + def sync_task(x: int, y: int): + raise ValueError("Test error") + + task_fn = async_task if engine_type == "async" else sync_task + task_run_id = uuid4() + + with pytest.raises(ValueError, match="Test error"): + await self.run_task( + task_fn, + task_run_id=task_run_id, + parameters={"x": 1, "y": 2}, + engine_type=engine_type, + ) + + spans = instrumentation.get_finished_spans() + assert len(spans) == 1 + assert spans[0].status.status_code == trace.StatusCode.ERROR + assert "Test error" in spans[0].status.description + + async def test_span_exception_recording( + self, + engine_type: Literal["async", "sync"], + instrumentation: InstrumentationTester, + ): + @task + async def async_task(x: int, y: int): + raise Exception("Test error") + + @task + def sync_task(x: int, y: int): + raise Exception("Test error") + + task_fn = async_task if engine_type == "async" else sync_task + task_run_id = uuid4() + + with pytest.raises(Exception, match="Test error"): + await self.run_task( + task_fn, + task_run_id=task_run_id, + parameters={"x": 1, "y": 2}, + engine_type=engine_type, + ) + + spans = instrumentation.get_finished_spans() + assert len(spans) == 1 + + events = spans[0].events + assert any(event.name == "exception" for event in events) + exception_event = next(event for event in events if event.name == "exception") + assert exception_event.attributes["exception.type"] == "Exception" + assert exception_event.attributes["exception.message"] == "Test error" + + async def test_flow_labels( + self, + engine_type: Literal["async", "sync"], + instrumentation: InstrumentationTester, + sync_prefect_client: SyncPrefectClient, + ): + """Test that parent flow ID gets propagated to task spans""" + + @task + async def async_child_task(): + return 1 + + @task + def sync_child_task(): + return 1 + + @flow + async def async_parent_flow(): + return await async_child_task() + + @flow + def sync_parent_flow(): + return sync_child_task() + + if engine_type == "async": + state = await async_parent_flow(return_state=True) + else: + state = sync_parent_flow(return_state=True) + + spans = instrumentation.get_finished_spans() + task_spans = [ + span for span in spans if span.attributes.get("prefect.run.type") == "task" + ] + assert len(task_spans) == 1 + + assert state.state_details.flow_run_id is not None + flow_run = sync_prefect_client.read_flow_run(state.state_details.flow_run_id) + + # Verify the task span has the parent flow's ID + instrumentation.assert_has_attributes( + task_spans[0], {**flow_run.labels, "prefect.run.type": "task"} + ) diff --git a/tests/test_artifacts.py b/tests/test_artifacts.py index 227f54be0ce2..d5d38083a6c7 100644 --- a/tests/test_artifacts.py +++ b/tests/test_artifacts.py @@ -43,6 +43,7 @@ async def my_flow(): artifact_id = await my_flow() response = await client.get(f"/artifacts/{artifact_id}") + assert response.status_code == 200 result = schemas.core.Artifact.model_validate(response.json()) assert result.data == f"[{link_text}]({my_link})" @@ -67,6 +68,7 @@ def my_flow(): my_artifact_id, flow_run_id, task_run_id = my_flow() response = await client.get(f"/artifacts/{my_artifact_id}") + assert response.status_code == 200 my_link_artifact = schemas.core.Artifact.model_validate(response.json()) assert my_link_artifact.flow_run_id == flow_run_id @@ -88,6 +90,7 @@ def my_flow(): my_artifact_id, flow_run_id = my_flow() response = await client.get(f"/artifacts/{my_artifact_id}") + assert response.status_code == 200 my_link_artifact = schemas.core.Artifact.model_validate(response.json()) assert my_link_artifact.flow_run_id == flow_run_id @@ -114,6 +117,7 @@ def my_flow(): my_artifact_id, flow_run_id = my_flow() response = await client.get(f"/artifacts/{my_artifact_id}") + assert response.status_code == 200 my_link_artifact = schemas.core.Artifact.model_validate(response.json()) assert my_link_artifact.flow_run_id == flow_run_id @@ -164,6 +168,7 @@ def my_flow(): my_artifact_id, flow_run_id, task_run_id = my_flow() response = await client.get(f"/artifacts/{my_artifact_id}") + assert response.status_code == 200 my_markdown_artifact = schemas.core.Artifact.model_validate(response.json()) assert my_markdown_artifact.flow_run_id == flow_run_id @@ -185,6 +190,7 @@ def my_flow(): my_artifact_id, flow_run_id = my_flow() response = await client.get(f"/artifacts/{my_artifact_id}") + assert response.status_code == 200 my_markdown_artifact = schemas.core.Artifact.model_validate(response.json()) assert my_markdown_artifact.flow_run_id == flow_run_id @@ -210,6 +216,7 @@ def my_flow(): my_artifact_id, flow_run_id = my_flow() response = await client.get(f"/artifacts/{my_artifact_id}") + assert response.status_code == 200 my_markdown_artifact = schemas.core.Artifact.model_validate(response.json()) assert my_markdown_artifact.flow_run_id == flow_run_id @@ -252,6 +259,7 @@ async def my_flow(): artifact_id = await my_flow() response = await client.get(f"/artifacts/{artifact_id}") + assert response.status_code == 200 result = schemas.core.Artifact.model_validate(response.json()) result_data = json.loads(result.data) assert result_data == my_table @@ -271,6 +279,7 @@ async def my_flow(): artifact_id = await my_flow() response = await client.get(f"/artifacts/{artifact_id}") + assert response.status_code == 200 result = schemas.core.Artifact.model_validate(response.json()) result_data = json.loads(result.data) @@ -291,6 +300,7 @@ async def my_flow(): artifact_id = await my_flow() response = await client.get(f"/artifacts/{artifact_id}") + assert response.status_code == 200 result = schemas.core.Artifact.model_validate(response.json()) result_data = json.loads(result.data) assert result_data == my_table @@ -341,6 +351,7 @@ def my_flow(): my_artifact_id, flow_run_id = my_flow() response = await client.get(f"/artifacts/{my_artifact_id}") + assert response.status_code == 200 my_table_artifact = schemas.core.Artifact.model_validate(response.json()) assert my_table_artifact.flow_run_id == flow_run_id @@ -369,6 +380,7 @@ def my_flow(): my_artifact_id, flow_run_id = my_flow() response = await client.get(f"/artifacts/{my_artifact_id}") + assert response.status_code == 200 my_table_artifact = schemas.core.Artifact.model_validate(response.json()) assert my_table_artifact.flow_run_id == flow_run_id @@ -427,6 +439,7 @@ async def my_flow(): artifact_id = await my_flow() response = await client.get(f"/artifacts/{artifact_id}") + assert response.status_code == 200 my_artifact = schemas.core.Artifact.model_validate(response.json()) my_data = json.loads(my_artifact.data) assert my_data == {"a": [1, 3], "b": [2, None]} @@ -463,6 +476,7 @@ async def my_flow(): artifact_id = await my_flow() response = await client.get(f"/artifacts/{artifact_id}") + assert response.status_code == 200 my_artifact = schemas.core.Artifact.model_validate(response.json()) my_data = json.loads(my_artifact.data) assert my_data == [ @@ -481,6 +495,7 @@ async def my_flow(): artifact_id = await my_flow() response = await client.get(f"/artifacts/{artifact_id}") + assert response.status_code == 200 my_artifact = schemas.core.Artifact.model_validate(response.json()) assert my_artifact.data == progress assert my_artifact.type == "progress" @@ -497,6 +512,7 @@ async def my_flow(): artifact_id = await my_flow() response = await client.get(f"/artifacts/{artifact_id}") + assert response.status_code == 200 my_artifact = schemas.core.Artifact.model_validate(response.json()) assert my_artifact.data == progress assert my_artifact.type == "progress" @@ -524,6 +540,7 @@ def my_flow(): my_artifact_id, flow_run_id, task_run_id = my_flow() response = await client.get(f"/artifacts/{my_artifact_id}") + assert response.status_code == 200 my_progress_artifact = schemas.core.Artifact.model_validate(response.json()) assert my_progress_artifact.flow_run_id == flow_run_id @@ -548,6 +565,7 @@ def my_flow(): my_artifact_id, flow_run_id = my_flow() response = await client.get(f"/artifacts/{my_artifact_id}") + assert response.status_code == 200 my_progress_artifact = schemas.core.Artifact.model_validate(response.json()) assert my_progress_artifact.flow_run_id == flow_run_id @@ -577,6 +595,7 @@ def my_flow(): my_artifact_id, flow_run_id, task_run_id = my_flow() response = await client.get(f"/artifacts/{my_artifact_id}") + assert response.status_code == 200 my_image_artifact = schemas.core.Artifact.model_validate(response.json()) assert my_image_artifact.flow_run_id == flow_run_id @@ -604,6 +623,7 @@ def my_flow(): my_artifact_id, flow_run_id = my_flow() response = await client.get(f"/artifacts/{my_artifact_id}") + assert response.status_code == 200 my_image_artifact = schemas.core.Artifact.model_validate(response.json()) assert my_image_artifact.flow_run_id == flow_run_id @@ -636,6 +656,7 @@ async def my_flow(): new_progress = 50.0 await update_progress_artifact(artifact_id, new_progress) response = await client.get(f"/artifacts/{artifact_id}") + assert response.status_code == 200 my_artifact = schemas.core.Artifact.model_validate(response.json()) assert my_artifact.data == new_progress @@ -663,6 +684,7 @@ def my_flow(): my_artifact_id, flow_run_id, task_run_id = my_flow() response = await client.get(f"/artifacts/{my_artifact_id}") + assert response.status_code == 200 my_progress_artifact = schemas.core.Artifact.model_validate(response.json()) assert my_progress_artifact.flow_run_id == flow_run_id @@ -688,6 +710,7 @@ def my_flow(): my_artifact_id, flow_run_id = my_flow() response = await client.get(f"/artifacts/{my_artifact_id}") + assert response.status_code == 200 my_progress_artifact = schemas.core.Artifact.model_validate(response.json()) assert my_progress_artifact.flow_run_id == flow_run_id diff --git a/tests/test_automations.py b/tests/test_automations.py index c9efa5e45eb2..96781cf99e18 100644 --- a/tests/test_automations.py +++ b/tests/test_automations.py @@ -183,3 +183,13 @@ async def test_nonexistent_id_raises_value_error(): async def test_nonexistent_name_raises_value_error(): with pytest.raises(ValueError): await Automation.read(name="nonexistent_name") + + +async def test_disabled_automation_can_be_enabled( + prefect_client, automation: Automation +): + await automation.disable() + await automation.enable() + + updated_automation = await Automation.read(id=automation.id) + assert updated_automation.enabled is True diff --git a/tests/test_context.py b/tests/test_context.py index 84309f1aba34..8d51fddbb104 100644 --- a/tests/test_context.py +++ b/tests/test_context.py @@ -5,7 +5,6 @@ from unittest.mock import MagicMock import pytest -from pendulum.datetime import DateTime import prefect.settings from prefect import flow, task @@ -40,6 +39,7 @@ ) from prefect.states import Running from prefect.task_runners import ThreadPoolTaskRunner +from prefect.types import DateTime class ExampleContext(ContextModel): diff --git a/tests/test_flow_engine.py b/tests/test_flow_engine.py index d1bc1a9bbdfd..9ceeae8241bd 100644 --- a/tests/test_flow_engine.py +++ b/tests/test_flow_engine.py @@ -10,9 +10,7 @@ import anyio import pydantic import pytest -from opentelemetry import trace -import prefect from prefect import Flow, __development_base_path__, flow, task from prefect.client.orchestration import PrefectClient, SyncPrefectClient from prefect.client.schemas.filters import FlowFilter, FlowRunFilter @@ -49,8 +47,6 @@ from prefect.utilities.callables import get_call_parameters from prefect.utilities.filesystem import tmpchdir -from .telemetry.instrumentation_tester import InstrumentationTester - @flow async def foo(): @@ -615,7 +611,7 @@ def my_flow(): # after a flow run retry, the stale value will be pulled from the cache. async def test_flow_retry_with_no_error_in_flow_and_one_failed_child_flow( - self, sync_prefect_client: SyncPrefectClient + self, sync_prefect_client ): child_run_count = 0 flow_run_count = 0 @@ -641,7 +637,6 @@ async def parent_flow(): assert await state.result() == "hello" assert flow_run_count == 2 assert child_run_count == 2, "Child flow should be reset and run again" - # Ensure that the tracking task run for the subflow is reset and tracked task_runs = sync_prefect_client.read_task_runs( flow_run_filter=FlowRunFilter( @@ -1854,243 +1849,3 @@ async def expensive_flow(): concurrency_limit_v2.name ) assert response.active_slots == 0 - - -class TestFlowRunInstrumentation: - def test_flow_run_instrumentation(self, instrumentation: InstrumentationTester): - @flow - def instrumented_flow(): - from prefect.states import Completed - - return Completed(message="The flow is with you") - - instrumented_flow() - - spans = instrumentation.get_finished_spans() - assert len(spans) == 1 - span = spans[0] - assert span is not None - instrumentation.assert_span_instrumented_for(span, prefect) - - instrumentation.assert_has_attributes( - span, - { - "prefect.run.type": "flow", - "prefect.tags": (), - "prefect.flow.name": "instrumented-flow", - "prefect.run.id": mock.ANY, - }, - ) - assert span.status.status_code == trace.StatusCode.OK - - assert len(span.events) == 2 - assert span.events[0].name == "Running" - instrumentation.assert_has_attributes( - span.events[0], - { - "prefect.state.message": "", - "prefect.state.type": StateType.RUNNING, - "prefect.state.name": "Running", - "prefect.state.id": mock.ANY, - }, - ) - - assert span.events[1].name == "Completed" - instrumentation.assert_has_attributes( - span.events[1], - { - "prefect.state.message": "The flow is with you", - "prefect.state.type": StateType.COMPLETED, - "prefect.state.name": "Completed", - "prefect.state.id": mock.ANY, - }, - ) - - def test_flow_run_instrumentation_captures_tags( - self, - instrumentation: InstrumentationTester, - ): - from prefect import tags - - @flow - def instrumented_flow(): - pass - - with tags("foo", "bar"): - instrumented_flow() - - spans = instrumentation.get_finished_spans() - assert len(spans) == 1 - span = spans[0] - assert span is not None - instrumentation.assert_span_instrumented_for(span, prefect) - - instrumentation.assert_has_attributes( - span, - { - "prefect.run.type": "flow", - "prefect.flow.name": "instrumented-flow", - "prefect.run.id": mock.ANY, - }, - ) - # listy span attributes are serialized to tuples -- order seems nondeterministic so ignore rather than flake - assert set(span.attributes.get("prefect.tags")) == {"foo", "bar"} # type: ignore - assert span.status.status_code == trace.StatusCode.OK - - def test_flow_run_instrumentation_captures_labels( - self, - instrumentation: InstrumentationTester, - sync_prefect_client: SyncPrefectClient, - ): - @flow - def instrumented_flow(): - pass - - state = instrumented_flow(return_state=True) - - assert state.state_details.flow_run_id is not None - flow_run = sync_prefect_client.read_flow_run(state.state_details.flow_run_id) - - spans = instrumentation.get_finished_spans() - assert len(spans) == 1 - span = spans[0] - assert span is not None - - instrumentation.assert_has_attributes( - span, - { - **flow_run.labels, - "prefect.run.type": "flow", - "prefect.flow.name": "instrumented-flow", - "prefect.run.id": mock.ANY, - }, - ) - - def test_flow_run_instrumentation_on_exception( - self, instrumentation: InstrumentationTester - ): - @flow - def a_broken_flow(): - raise Exception("This flow broke!") - - with pytest.raises(Exception): - a_broken_flow() - - spans = instrumentation.get_finished_spans() - assert len(spans) == 1 - span = spans[0] - assert span is not None - instrumentation.assert_span_instrumented_for(span, prefect) - - instrumentation.assert_has_attributes( - span, - { - "prefect.run.type": "flow", - "prefect.tags": (), - "prefect.flow.name": "a-broken-flow", - "prefect.run.id": mock.ANY, - }, - ) - - assert span.status.status_code == trace.StatusCode.ERROR - assert ( - span.status.description - == "Flow run encountered an exception: Exception: This flow broke!" - ) - - assert len(span.events) == 3 - assert span.events[0].name == "Running" - instrumentation.assert_has_attributes( - span.events[0], - { - "prefect.state.message": "", - "prefect.state.type": StateType.RUNNING, - "prefect.state.name": "Running", - "prefect.state.id": mock.ANY, - }, - ) - - assert span.events[1].name == "Failed" - instrumentation.assert_has_attributes( - span.events[1], - { - "prefect.state.message": "Flow run encountered an exception: Exception: This flow broke!", - "prefect.state.type": StateType.FAILED, - "prefect.state.name": "Failed", - "prefect.state.id": mock.ANY, - }, - ) - - assert span.events[2].name == "exception" - instrumentation.assert_has_attributes( - span.events[2], - { - "exception.type": "Exception", - "exception.message": "This flow broke!", - "exception.stacktrace": mock.ANY, - "exception.escaped": "False", - }, - ) - - def test_flow_run_instrumentation_on_timeout( - self, instrumentation: InstrumentationTester - ): - @flow(timeout_seconds=0.1) - def a_slow_flow(): - time.sleep(1) - - with pytest.raises(TimeoutError): - a_slow_flow() - - spans = instrumentation.get_finished_spans() - assert len(spans) == 1 - span = spans[0] - assert span is not None - instrumentation.assert_span_instrumented_for(span, prefect) - - instrumentation.assert_has_attributes( - span, - { - "prefect.run.type": "flow", - "prefect.tags": (), - "prefect.flow.name": "a-slow-flow", - "prefect.run.id": mock.ANY, - }, - ) - - assert span.status.status_code == trace.StatusCode.ERROR - assert span.status.description == "Flow run exceeded timeout of 0.1 second(s)" - - assert len(span.events) == 3 - assert span.events[0].name == "Running" - instrumentation.assert_has_attributes( - span.events[0], - { - "prefect.state.message": "", - "prefect.state.type": StateType.RUNNING, - "prefect.state.name": "Running", - "prefect.state.id": mock.ANY, - }, - ) - - assert span.events[1].name == "TimedOut" - instrumentation.assert_has_attributes( - span.events[1], - { - "prefect.state.message": "Flow run exceeded timeout of 0.1 second(s)", - "prefect.state.type": StateType.FAILED, - "prefect.state.name": "TimedOut", - "prefect.state.id": mock.ANY, - }, - ) - - assert span.events[2].name == "exception" - instrumentation.assert_has_attributes( - span.events[2], - { - "exception.type": "prefect.flow_engine.FlowRunTimeoutError", - "exception.message": "Scope timed out after 0.1 second(s).", - "exception.stacktrace": mock.ANY, - "exception.escaped": "False", - }, - ) diff --git a/tests/test_flows.py b/tests/test_flows.py index b68de0d96708..40793fe38662 100644 --- a/tests/test_flows.py +++ b/tests/test_flows.py @@ -1534,9 +1534,10 @@ def timeout_noticing_task(): @flow(timeout_seconds=0.1) def my_subflow(): - time.sleep(0.5) + start = time.monotonic() + while time.monotonic() - start < 0.5: + pass timeout_noticing_task() - time.sleep(10) nonlocal completed completed = True diff --git a/tests/test_settings.py b/tests/test_settings.py index a42c32b0482d..c054e114e58d 100644 --- a/tests/test_settings.py +++ b/tests/test_settings.py @@ -231,6 +231,7 @@ "PREFECT_EXPERIMENTAL_WARN": {"test_value": True, "legacy": True}, "PREFECT_EXPERIMENTS_TELEMETRY_ENABLED": {"test_value": False}, "PREFECT_EXPERIMENTS_WARN": {"test_value": True}, + "PREFECT_EXPERIMENTS_LINEAGE_EVENTS_ENABLED": {"test_value": True}, "PREFECT_FLOW_DEFAULT_RETRIES": {"test_value": 10, "legacy": True}, "PREFECT_FLOWS_DEFAULT_RETRIES": {"test_value": 10}, "PREFECT_FLOW_DEFAULT_RETRY_DELAY_SECONDS": {"test_value": 10, "legacy": True}, diff --git a/tests/test_task_engine.py b/tests/test_task_engine.py index 185045fb9455..c4f1847b160c 100644 --- a/tests/test_task_engine.py +++ b/tests/test_task_engine.py @@ -1511,6 +1511,58 @@ async def foo(): assert run.end_time == failed.timestamp assert run.total_run_time == failed.timestamp - running.timestamp + async def test_sync_task_sets_end_time_on_failed_timedout( + self, prefect_client, events_pipeline + ): + ID = None + + @task + def foo(): + nonlocal ID + ID = TaskRunContext.get().task_run.id + raise TimeoutError + + with pytest.raises(TimeoutError): + run_task_sync(foo) + + await events_pipeline.process_events() + + run = await prefect_client.read_task_run(ID) + + states = await prefect_client.read_task_run_states(ID) + running = [state for state in states if state.type == StateType.RUNNING][0] + failed = [state for state in states if state.type == StateType.FAILED][0] + + assert failed.name == "TimedOut" + assert run.end_time + assert run.end_time == failed.timestamp + assert run.total_run_time == failed.timestamp - running.timestamp + + async def test_async_task_sets_end_time_on_failed_timedout( + self, prefect_client, events_pipeline + ): + ID = None + + @task + async def foo(): + nonlocal ID + ID = TaskRunContext.get().task_run.id + raise TimeoutError + + with pytest.raises(TimeoutError): + await run_task_async(foo) + + await events_pipeline.process_events() + run = await prefect_client.read_task_run(ID) + states = await prefect_client.read_task_run_states(ID) + running = [state for state in states if state.type == StateType.RUNNING][0] + failed = [state for state in states if state.type == StateType.FAILED][0] + + assert failed.name == "TimedOut" + assert run.end_time + assert run.end_time == failed.timestamp + assert run.total_run_time == failed.timestamp - running.timestamp + async def test_sync_task_sets_end_time_on_crashed( self, prefect_client, events_pipeline ): diff --git a/tests/test_task_worker.py b/tests/test_task_worker.py index 5e64d241a5da..a7d730128f1b 100644 --- a/tests/test_task_worker.py +++ b/tests/test_task_worker.py @@ -106,9 +106,10 @@ async def test_task_worker_basic_context_management(): async def test_handle_sigterm(mock_create_subscription): task_worker = TaskWorker(...) - with patch("sys.exit") as mock_exit, patch.object( - task_worker, "stop", new_callable=AsyncMock - ) as mock_stop: + with ( + patch("sys.exit") as mock_exit, + patch.object(task_worker, "stop", new_callable=AsyncMock) as mock_stop, + ): await task_worker.start() mock_create_subscription.assert_called_once() @@ -120,8 +121,9 @@ async def test_handle_sigterm(mock_create_subscription): async def test_task_worker_client_id_is_set(): - with patch("socket.gethostname", return_value="foo"), patch( - "os.getpid", return_value=42 + with ( + patch("socket.gethostname", return_value="foo"), + patch("os.getpid", return_value=42), ): task_worker = TaskWorker(...) task_worker._client = MagicMock(api_url="http://localhost:4200") @@ -227,6 +229,13 @@ async def test_task_worker_can_execute_a_single_sync_single_task_run( assert await updated_task_run.state.result() == 42 +def test_task_worker_cannot_be_instantiated_outside_of_async_context(foo_task): + with pytest.raises( + RuntimeError, match="TaskWorker must be initialized within an async context." + ): + TaskWorker(foo_task).start() + + class TestTaskWorkerTaskRunRetries: async def test_task_run_via_task_worker_respects_retry_policy( self, prefect_client, events_pipeline diff --git a/tests/test_tasks.py b/tests/test_tasks.py index 88cc1c4d086a..06d41abd6423 100644 --- a/tests/test_tasks.py +++ b/tests/test_tasks.py @@ -4172,6 +4172,21 @@ def my_flow(): class TestTaskConstructorValidation: + async def test_task_cannot_configure_poorly_typed_retry_delay(self): + with pytest.raises(TypeError, match="Invalid"): + + @task(retries=42, retry_delay_seconds=dict(x=4)) + async def insanity(): + raise RuntimeError("try again!") + + with pytest.raises(TypeError, match="Invalid"): + + @task(retries=42, retry_delay_seconds=2) + async def sanity(): + raise RuntimeError("try again!") + + more_insanity = sanity.with_options(retry_delay_seconds=dict(x=4)) # noqa: F841 + async def test_task_cannot_configure_too_many_custom_retry_delays(self): with pytest.raises(ValueError, match="Can not configure more"): diff --git a/tests/test_variables.py b/tests/test_variables.py index 7720c78b964d..07a0417af313 100644 --- a/tests/test_variables.py +++ b/tests/test_variables.py @@ -5,12 +5,12 @@ @pytest.fixture -async def variable(): - var = await Variable.set(name="my_variable", value="my-value", tags=["123", "456"]) +async def variable() -> Variable: + var = await Variable.aset(name="my_variable", value="my-value", tags=["123", "456"]) return var -async def test_set_sync(variable): +async def test_set_sync(variable: Variable): @flow def test_flow(): # confirm variable doesn't exist @@ -46,7 +46,7 @@ def test_flow(): test_flow() -async def test_set_async(variable): +async def test_set_async(variable: Variable): # confirm variable doesn't exist variable_doesnt_exist = await Variable.get("my_new_variable") assert variable_doesnt_exist is None @@ -96,7 +96,7 @@ async def test_json_types(value): assert set_value.value == value -async def test_get(variable): +async def test_get(variable: Variable): # get value value = await Variable.get(variable.name) assert value == variable.value @@ -110,7 +110,7 @@ async def test_get(variable): assert doesnt_exist_default == 42 -async def test_get_async(variable): +async def test_get_async(variable: Variable): # get value value = await Variable.get(variable.name) assert value == variable.value @@ -124,7 +124,7 @@ async def test_get_async(variable): assert doesnt_exist_default == 42 -async def test_unset(variable): +async def test_unset(variable: Variable): # unset a variable unset = await Variable.unset(variable.name) assert unset is True @@ -138,7 +138,7 @@ async def test_unset(variable): assert unset_doesnt_exist is False -async def test_unset_async(variable): +async def test_unset_async(variable: Variable): # unset a variable unset = await Variable.unset(variable.name) assert unset is True @@ -152,7 +152,7 @@ async def test_unset_async(variable): assert unset_doesnt_exist is False -def test_get_in_sync_flow(variable): +def test_get_in_sync_flow(variable: Variable): @flow def foo(): var = Variable.get("my_variable") @@ -162,7 +162,7 @@ def foo(): assert value == variable.value -async def test_get_in_async_flow(variable): +async def test_get_in_async_flow(variable: Variable): @flow async def foo(): var = await Variable.get("my_variable") @@ -192,7 +192,7 @@ async def foo(): assert res.value == "my-value" -async def test_unset_in_sync_flow(variable): +async def test_unset_in_sync_flow(variable: Variable): @flow def foo(): Variable.unset(variable.name) @@ -202,7 +202,7 @@ def foo(): assert value is None -async def test_unset_in_async_flow(variable): +async def test_unset_in_async_flow(variable: Variable): @flow async def foo(): await Variable.unset(variable.name) @@ -210,3 +210,19 @@ async def foo(): await foo() value = await Variable.get(variable.name) assert value is None + + +async def test_explicit_async_methods_from_async_context(variable: Variable): + variable = await Variable.aset("some_variable", value="my value", tags=["marvin"]) + assert variable.value == "my value" + assert variable.tags == ["marvin"] + assert variable.name == "some_variable" + + updated = await Variable.aset( + "some_variable", value="my updated value", overwrite=True + ) + assert updated.value == "my updated value" + assert updated.tags == [] + + await Variable.aunset("some_variable") + assert await Variable.aget("some_variable") is None diff --git a/tests/utilities/test_proxy.py b/tests/utilities/test_proxy.py new file mode 100644 index 000000000000..49723aea11fb --- /dev/null +++ b/tests/utilities/test_proxy.py @@ -0,0 +1,42 @@ +from unittest.mock import Mock + +from prefect.events.clients import WebsocketProxyConnect + + +def test_init_ws_without_proxy(): + client = WebsocketProxyConnect("ws://example.com") + assert client.uri == "ws://example.com" + assert client._host == "example.com" + assert client._port == 80 + assert client._proxy is None + + +def test_init_wss_without_proxy(): + client = WebsocketProxyConnect("wss://example.com") + assert client.uri == "wss://example.com" + assert client._host == "example.com" + assert client._port == 443 + assert "server_hostname" in client._kwargs + assert client._proxy is None + + +def test_init_ws_with_proxy(monkeypatch): + monkeypatch.setenv("HTTP_PROXY", "http://proxy:3128") + mock_proxy = Mock() + monkeypatch.setattr("prefect.events.clients.Proxy", mock_proxy) + + client = WebsocketProxyConnect("ws://example.com") + + mock_proxy.from_url.assert_called_once_with("http://proxy:3128") + assert client._proxy is not None + + +def test_init_wss_with_proxy(monkeypatch): + monkeypatch.setenv("HTTPS_PROXY", "https://proxy:3128") + mock_proxy = Mock() + monkeypatch.setattr("prefect.events.clients.Proxy", mock_proxy) + + client = WebsocketProxyConnect("wss://example.com") + + mock_proxy.from_url.assert_called_once_with("https://proxy:3128") + assert client._proxy is not None diff --git a/tests/workers/test_base_worker.py b/tests/workers/test_base_worker.py index dc870a4a07b6..538801a0cc35 100644 --- a/tests/workers/test_base_worker.py +++ b/tests/workers/test_base_worker.py @@ -2083,6 +2083,7 @@ async def _worker_metadata(self) -> WorkerMetadata: assert worker._worker_metadata_sent +@pytest.mark.skip(reason="Passing worker labels to flow run is temporarily disabled") async def test_worker_gives_labels_to_flow_runs_when_using_cloud_api( prefect_client: PrefectClient, worker_deployment_wq1, work_pool ): @@ -2109,10 +2110,16 @@ def create_run_with_deployment(state): CloudClientMock.update_flow_run_labels.assert_awaited_once_with( flow_run.id, - {"prefect.worker.name": worker.name, "prefect.worker.type": worker.type}, + { + "prefect.worker.name": worker.name, + "prefect.worker.type": worker.type, + "prefect.work-pool.name": work_pool.name, + "prefect.work-pool.id": str(work_pool.id), + }, ) +@pytest.mark.skip(reason="Passing worker labels to flow run is temporarily disabled") async def test_worker_does_not_give_labels_to_flow_runs_when_not_using_cloud_api( prefect_client: PrefectClient, worker_deployment_wq1, work_pool ): diff --git a/tests/workers/test_process_worker.py b/tests/workers/test_process_worker.py index 975e6b64b9bd..ba0cfd270705 100644 --- a/tests/workers/test_process_worker.py +++ b/tests/workers/test_process_worker.py @@ -13,7 +13,6 @@ import pytest from exceptiongroup import ExceptionGroup, catch from pydantic import BaseModel -from pydantic_extra_types.pendulum_dt import DateTime from sqlalchemy.ext.asyncio import AsyncSession import prefect @@ -30,6 +29,7 @@ ) from prefect.states import Cancelled, Cancelling, Completed, Pending, Running, Scheduled from prefect.testing.utilities import AsyncMock, MagicMock +from prefect.types import DateTime from prefect.workers.process import ( ProcessWorker, ProcessWorkerResult, diff --git a/ui-v2/.husky/pre-commit b/ui-v2/.husky/pre-commit deleted file mode 100644 index 9cb33f011be4..000000000000 --- a/ui-v2/.husky/pre-commit +++ /dev/null @@ -1,3 +0,0 @@ -cd ui-v2 -npm run lint -npm run format diff --git a/ui-v2/biome.json b/ui-v2/biome.json index 4af4c5e34f5c..6e4f7ed07111 100644 --- a/ui-v2/biome.json +++ b/ui-v2/biome.json @@ -7,14 +7,15 @@ }, "files": { "ignoreUnknown": false, - "ignore": [] + "ignore": ["src/api/prefect.ts", "src/routeTree.gen.ts"] }, "formatter": { "enabled": true, "indentStyle": "tab" }, "organizeImports": { - "enabled": true + "enabled": true, + "ignore": ["src/api/prefect.ts", "src/routeTree.gen.ts"] }, "linter": { "enabled": false, diff --git a/ui-v2/eslint.config.js b/ui-v2/eslint.config.js index 214762a6992a..970fa95b8cc9 100644 --- a/ui-v2/eslint.config.js +++ b/ui-v2/eslint.config.js @@ -1,13 +1,13 @@ import js from "@eslint/js"; +import pluginQuery from "@tanstack/eslint-plugin-query"; import pluginRouter from "@tanstack/eslint-plugin-router"; +import jestDom from "eslint-plugin-jest-dom"; import react from "eslint-plugin-react"; import reactHooks from "eslint-plugin-react-hooks"; import reactRefresh from "eslint-plugin-react-refresh"; +import testingLibrary from "eslint-plugin-testing-library"; import globals from "globals"; import tseslint from "typescript-eslint"; -import testingLibrary from "eslint-plugin-testing-library"; -import jestDom from "eslint-plugin-jest-dom"; -import pluginQuery from "@tanstack/eslint-plugin-query"; export default tseslint.config( { ignores: ["dist", "src/api/prefect.ts"] }, diff --git a/ui-v2/package-lock.json b/ui-v2/package-lock.json index 3f1cee99dea3..298785f1f361 100644 --- a/ui-v2/package-lock.json +++ b/ui-v2/package-lock.json @@ -22,6 +22,7 @@ "@radix-ui/react-select": "^2.1.2", "@radix-ui/react-separator": "^1.1.0", "@radix-ui/react-slot": "^1.1.0", + "@radix-ui/react-switch": "^1.1.1", "@radix-ui/react-tabs": "^1.1.1", "@radix-ui/react-toast": "^1.2.2", "@radix-ui/react-tooltip": "^1.1.3", @@ -48,6 +49,7 @@ "devDependencies": { "@biomejs/biome": "1.9.4", "@eslint/js": "^9.12.0", + "@faker-js/faker": "^9.3.0", "@storybook/addon-essentials": "^8.4.2", "@storybook/addon-interactions": "^8.4.2", "@storybook/blocks": "^8.4.2", @@ -66,6 +68,7 @@ "@types/react": "^18.3.12", "@types/react-dom": "^18.3.1", "@vitejs/plugin-react-swc": "^3.5.0", + "@vitest/coverage-v8": "^2.1.4", "autoprefixer": "^10.4.20", "eslint": "^9.12.0", "eslint-plugin-jest-dom": "^5.4.0", @@ -76,7 +79,6 @@ "eslint-plugin-testing-library": "^6.4.0", "eslint-plugin-unused-imports": "^4.1.4", "globals": "^15.10.0", - "husky": "^9.1.6", "jsdom": "^25.0.1", "msw": "^2.6.0", "postcss": "^8.4.47", @@ -392,6 +394,12 @@ "node": ">=6.9.0" } }, + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", + "dev": true + }, "node_modules/@biomejs/biome": { "version": "1.9.4", "resolved": "https://registry.npmjs.org/@biomejs/biome/-/biome-1.9.4.tgz", @@ -1197,6 +1205,22 @@ "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, + "node_modules/@faker-js/faker": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/@faker-js/faker/-/faker-9.3.0.tgz", + "integrity": "sha512-r0tJ3ZOkMd9xsu3VRfqlFR6cz0V/jFYRswAIpC+m/DIfAUXq7g8N7wTAlhSANySXYGKzGryfDXwtwsY8TxEIDw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/fakerjs" + } + ], + "engines": { + "node": ">=18.0.0", + "npm": ">=9.0.0" + } + }, "node_modules/@floating-ui/core": { "version": "1.6.8", "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.8.tgz", @@ -1415,6 +1439,15 @@ "node": ">=12" } }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, "node_modules/@joshwooding/vite-plugin-react-docgen-typescript": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/@joshwooding/vite-plugin-react-docgen-typescript/-/vite-plugin-react-docgen-typescript-0.3.0.tgz", @@ -2378,6 +2411,35 @@ } } }, + "node_modules/@radix-ui/react-switch": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-switch/-/react-switch-1.1.1.tgz", + "integrity": "sha512-diPqDDoBcZPSicYoMWdWx+bCPuTRH4QSp9J+65IvtdS0Kuzt67bI6n32vCj8q6NZmYW/ah+2orOtMwcX5eQwIg==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.0", + "@radix-ui/react-compose-refs": "1.1.0", + "@radix-ui/react-context": "1.1.1", + "@radix-ui/react-primitive": "2.0.0", + "@radix-ui/react-use-controllable-state": "1.1.0", + "@radix-ui/react-use-previous": "1.1.0", + "@radix-ui/react-use-size": "1.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-tabs": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/@radix-ui/react-tabs/-/react-tabs-1.1.1.tgz", @@ -3785,152 +3847,6 @@ "eslint": "^8.57.0 || ^9.0.0" } }, - "node_modules/@tanstack/eslint-plugin-query/node_modules/@typescript-eslint/scope-manager": { - "version": "8.16.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.16.0.tgz", - "integrity": "sha512-mwsZWubQvBki2t5565uxF0EYvG+FwdFb8bMtDuGQLdCCnGPrDEDvm1gtfynuKlnpzeBRqdFCkMf9jg1fnAK8sg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/types": "8.16.0", - "@typescript-eslint/visitor-keys": "8.16.0" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@tanstack/eslint-plugin-query/node_modules/@typescript-eslint/types": { - "version": "8.16.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.16.0.tgz", - "integrity": "sha512-NzrHj6thBAOSE4d9bsuRNMvk+BvaQvmY4dDglgkgGC0EW/tB3Kelnp3tAKH87GEwzoxgeQn9fNGRyFJM/xd+GQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@tanstack/eslint-plugin-query/node_modules/@typescript-eslint/typescript-estree": { - "version": "8.16.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.16.0.tgz", - "integrity": "sha512-E2+9IzzXMc1iaBy9zmo+UYvluE3TW7bCGWSF41hVWUE01o8nzr1rvOQYSxelxr6StUvRcTMe633eY8mXASMaNw==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "@typescript-eslint/types": "8.16.0", - "@typescript-eslint/visitor-keys": "8.16.0", - "debug": "^4.3.4", - "fast-glob": "^3.3.2", - "is-glob": "^4.0.3", - "minimatch": "^9.0.4", - "semver": "^7.6.0", - "ts-api-utils": "^1.3.0" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@tanstack/eslint-plugin-query/node_modules/@typescript-eslint/utils": { - "version": "8.16.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.16.0.tgz", - "integrity": "sha512-C1zRy/mOL8Pj157GiX4kaw7iyRLKfJXBR3L82hk5kS/GyHcOFmy4YUq/zfZti72I9wnuQtA/+xzft4wCC8PJdA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "8.16.0", - "@typescript-eslint/types": "8.16.0", - "@typescript-eslint/typescript-estree": "8.16.0" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^8.57.0 || ^9.0.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@tanstack/eslint-plugin-query/node_modules/@typescript-eslint/visitor-keys": { - "version": "8.16.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.16.0.tgz", - "integrity": "sha512-pq19gbaMOmFE3CbL0ZB8J8BFCo2ckfHBfaIsaOZgBIF4EoISJIdLX5xRhd0FGB0LlHReNRuzoJoMGpTjq8F2CQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/types": "8.16.0", - "eslint-visitor-keys": "^4.2.0" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@tanstack/eslint-plugin-query/node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/@tanstack/eslint-plugin-query/node_modules/minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/@tanstack/eslint-plugin-query/node_modules/semver": { - "version": "7.6.3", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", - "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", - "dev": true, - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/@tanstack/eslint-plugin-router": { "version": "1.77.7", "resolved": "https://registry.npmjs.org/@tanstack/eslint-plugin-router/-/eslint-plugin-router-1.77.7.tgz", @@ -4568,6 +4484,28 @@ } } }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/utils": { + "version": "8.12.2", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.12.2.tgz", + "integrity": "sha512-UTTuDIX3fkfAz6iSVa5rTuSfWIYZ6ATtEocQ/umkRSyC9O919lbZ8dcH7mysshrCdrAM03skJOEYaBugxN+M6A==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@typescript-eslint/scope-manager": "8.12.2", + "@typescript-eslint/types": "8.12.2", + "@typescript-eslint/typescript-estree": "8.12.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0" + } + }, "node_modules/@typescript-eslint/parser": { "version": "8.12.2", "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.12.2.tgz", @@ -4637,6 +4575,28 @@ } } }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/utils": { + "version": "8.12.2", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.12.2.tgz", + "integrity": "sha512-UTTuDIX3fkfAz6iSVa5rTuSfWIYZ6ATtEocQ/umkRSyC9O919lbZ8dcH7mysshrCdrAM03skJOEYaBugxN+M6A==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@typescript-eslint/scope-manager": "8.12.2", + "@typescript-eslint/types": "8.12.2", + "@typescript-eslint/typescript-estree": "8.12.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0" + } + }, "node_modules/@typescript-eslint/types": { "version": "8.12.2", "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.12.2.tgz", @@ -4715,15 +4675,15 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "8.12.2", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.12.2.tgz", - "integrity": "sha512-UTTuDIX3fkfAz6iSVa5rTuSfWIYZ6ATtEocQ/umkRSyC9O919lbZ8dcH7mysshrCdrAM03skJOEYaBugxN+M6A==", + "version": "8.17.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.17.0.tgz", + "integrity": "sha512-bQC8BnEkxqG8HBGKwG9wXlZqg37RKSMY7v/X8VEWD8JG2JuTHuNK0VFvMPMUKQcbk6B+tf05k+4AShAEtCtJ/w==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "8.12.2", - "@typescript-eslint/types": "8.12.2", - "@typescript-eslint/typescript-estree": "8.12.2" + "@typescript-eslint/scope-manager": "8.17.0", + "@typescript-eslint/types": "8.17.0", + "@typescript-eslint/typescript-estree": "8.17.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -4734,6 +4694,122 @@ }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/scope-manager": { + "version": "8.17.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.17.0.tgz", + "integrity": "sha512-/ewp4XjvnxaREtqsZjF4Mfn078RD/9GmiEAtTeLQ7yFdKnqwTOgRMSvFz4et9U5RiJQ15WTGXPLj89zGusvxBg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "8.17.0", + "@typescript-eslint/visitor-keys": "8.17.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/types": { + "version": "8.17.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.17.0.tgz", + "integrity": "sha512-gY2TVzeve3z6crqh2Ic7Cr+CAv6pfb0Egee7J5UAVWCpVvDI/F71wNfolIim4FE6hT15EbpZFVUj9j5i38jYXA==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/typescript-estree": { + "version": "8.17.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.17.0.tgz", + "integrity": "sha512-JqkOopc1nRKZpX+opvKqnM3XUlM7LpFMD0lYxTqOTKQfCWAmxw45e3qlOCsEqEB2yuacujivudOFpCnqkBDNMw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "8.17.0", + "@typescript-eslint/visitor-keys": "8.17.0", + "debug": "^4.3.4", + "fast-glob": "^3.3.2", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/visitor-keys": { + "version": "8.17.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.17.0.tgz", + "integrity": "sha512-1Hm7THLpO6ww5QU6H/Qp+AusUUl+z/CAm3cNZZ0jQvon9yicgO7Rwd+/WWRpMKLYV6p2UvdbR27c86rzCPpreg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "8.17.0", + "eslint-visitor-keys": "^4.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/semver": { + "version": "7.6.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" } }, "node_modules/@typescript-eslint/visitor-keys": { @@ -4828,6 +4904,38 @@ "vite": "^4 || ^5" } }, + "node_modules/@vitest/coverage-v8": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@vitest/coverage-v8/-/coverage-v8-2.1.4.tgz", + "integrity": "sha512-FPKQuJfR6VTfcNMcGpqInmtJuVXFSCd9HQltYncfR01AzXhLucMEtQ5SinPdZxsT5x/5BK7I5qFJ5/ApGCmyTQ==", + "dev": true, + "dependencies": { + "@ampproject/remapping": "^2.3.0", + "@bcoe/v8-coverage": "^0.2.3", + "debug": "^4.3.7", + "istanbul-lib-coverage": "^3.2.2", + "istanbul-lib-report": "^3.0.1", + "istanbul-lib-source-maps": "^5.0.6", + "istanbul-reports": "^3.1.7", + "magic-string": "^0.30.12", + "magicast": "^0.3.5", + "std-env": "^3.7.0", + "test-exclude": "^7.0.1", + "tinyrainbow": "^1.2.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@vitest/browser": "2.1.4", + "vitest": "2.1.4" + }, + "peerDependenciesMeta": { + "@vitest/browser": { + "optional": true + } + } + }, "node_modules/@vitest/expect": { "version": "2.1.4", "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-2.1.4.tgz", @@ -6527,9 +6635,9 @@ } }, "node_modules/eslint-plugin-testing-library": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-testing-library/-/eslint-plugin-testing-library-6.4.0.tgz", - "integrity": "sha512-yeWF+YgCgvNyPNI9UKnG0FjeE2sk93N/3lsKqcmR8dSfeXJwFT5irnWo7NjLf152HkRzfoFjh3LsBUrhvFz4eA==", + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-testing-library/-/eslint-plugin-testing-library-6.5.0.tgz", + "integrity": "sha512-Ls5TUfLm5/snocMAOlofSOJxNN0aKqwTlco7CrNtMjkTdQlkpSMaeTCDHCuXfzrI97xcx2rSCNeKeJjtpkNC1w==", "dev": true, "dependencies": { "@typescript-eslint/utils": "^5.62.0" @@ -7360,6 +7468,12 @@ "node": ">=18" } }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true + }, "node_modules/http-proxy-agent": { "version": "7.0.2", "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", @@ -7386,21 +7500,6 @@ "node": ">= 14" } }, - "node_modules/husky": { - "version": "9.1.6", - "resolved": "https://registry.npmjs.org/husky/-/husky-9.1.6.tgz", - "integrity": "sha512-sqbjZKK7kf44hfdE94EoX8MZNk0n7HeW37O4YrVGCF4wzgQjp+akPAkfUK5LZ6KuR/6sqeAVuXHji+RzQgOn5A==", - "dev": true, - "bin": { - "husky": "bin.js" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/typicode" - } - }, "node_modules/iconv-lite": { "version": "0.6.3", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", @@ -7929,6 +8028,56 @@ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "5.0.6", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-5.0.6.tgz", + "integrity": "sha512-yg2d+Em4KizZC5niWhQaIomgf5WlL4vOOjZ5xGCmF8SnPE/mDWWXgvRExdcpCgh9lLRRa1/fSYp2ymmbJ1pI+A==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.23", + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-reports": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.7.tgz", + "integrity": "sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==", + "dev": true, + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/iterator.prototype": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.3.tgz", @@ -8204,6 +8353,44 @@ "@jridgewell/sourcemap-codec": "^1.5.0" } }, + "node_modules/magicast": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/magicast/-/magicast-0.3.5.tgz", + "integrity": "sha512-L0WhttDl+2BOsybvEOLK7fW3UA0OQ0IQ2d6Zl2x/a6vVRs3bAY0ECOSHHeL5jD+SbOpOCUEi0y1DgHEn9Qn1AQ==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.25.4", + "@babel/types": "^7.25.4", + "source-map-js": "^1.2.0" + } + }, + "node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "7.6.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/map-or-similar": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/map-or-similar/-/map-or-similar-1.5.0.tgz", @@ -8371,9 +8558,9 @@ } }, "node_modules/nanoid": { - "version": "3.3.7", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", - "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "version": "3.3.8", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz", + "integrity": "sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==", "funding": [ { "type": "github", @@ -10144,6 +10331,44 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/test-exclude": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-7.0.1.tgz", + "integrity": "sha512-pFYqmTw68LXVjeWJMST4+borgQP2AyMNbg1BpZh9LbyhUeNkeaPF9gzfPGUAnSMV3qPYdWUwDIjjCLiSDOl7vg==", + "dev": true, + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^10.4.1", + "minimatch": "^9.0.4" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/test-exclude/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/test-exclude/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/text-table": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", @@ -10492,6 +10717,28 @@ } } }, + "node_modules/typescript-eslint/node_modules/@typescript-eslint/utils": { + "version": "8.12.2", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.12.2.tgz", + "integrity": "sha512-UTTuDIX3fkfAz6iSVa5rTuSfWIYZ6ATtEocQ/umkRSyC9O919lbZ8dcH7mysshrCdrAM03skJOEYaBugxN+M6A==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@typescript-eslint/scope-manager": "8.12.2", + "@typescript-eslint/types": "8.12.2", + "@typescript-eslint/typescript-estree": "8.12.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0" + } + }, "node_modules/unbox-primitive": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.2.tgz", diff --git a/ui-v2/package.json b/ui-v2/package.json index 0a8658674491..db5afb5bc3fb 100644 --- a/ui-v2/package.json +++ b/ui-v2/package.json @@ -9,11 +9,10 @@ "test": "vitest", "lint": "eslint .", "lint:fix": "eslint . --fix", - "format:check": "biome format", - "format": "biome format --write", + "format:check": "biome check", + "format": "biome check --write", "preview": "vite preview", "service-sync": "uv run ../scripts/generate_oss_openapi_schema.py && npx openapi-typescript oss_schema.json -o src/api/prefect.ts && rm oss_schema.json", - "prepare": "cd .. && husky ui-v2/.husky", "storybook": "storybook dev -p 6006", "build-storybook": "storybook build" }, @@ -32,6 +31,7 @@ "@radix-ui/react-select": "^2.1.2", "@radix-ui/react-separator": "^1.1.0", "@radix-ui/react-slot": "^1.1.0", + "@radix-ui/react-switch": "^1.1.1", "@radix-ui/react-tabs": "^1.1.1", "@radix-ui/react-toast": "^1.2.2", "@radix-ui/react-tooltip": "^1.1.3", @@ -58,6 +58,7 @@ "devDependencies": { "@biomejs/biome": "1.9.4", "@eslint/js": "^9.12.0", + "@faker-js/faker": "^9.3.0", "@storybook/addon-essentials": "^8.4.2", "@storybook/addon-interactions": "^8.4.2", "@storybook/blocks": "^8.4.2", @@ -76,6 +77,7 @@ "@types/react": "^18.3.12", "@types/react-dom": "^18.3.1", "@vitejs/plugin-react-swc": "^3.5.0", + "@vitest/coverage-v8": "^2.1.4", "autoprefixer": "^10.4.20", "eslint": "^9.12.0", "eslint-plugin-jest-dom": "^5.4.0", @@ -86,7 +88,6 @@ "eslint-plugin-testing-library": "^6.4.0", "eslint-plugin-unused-imports": "^4.1.4", "globals": "^15.10.0", - "husky": "^9.1.6", "jsdom": "^25.0.1", "msw": "^2.6.0", "postcss": "^8.4.47", diff --git a/ui-v2/src/api/prefect.ts b/ui-v2/src/api/prefect.ts index f49fc08bb2ba..bcbff9e8b841 100644 --- a/ui-v2/src/api/prefect.ts +++ b/ui-v2/src/api/prefect.ts @@ -4,15898 +4,15705 @@ */ export interface paths { - "/health": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** Health Check */ - get: operations["health_check_health_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/version": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** Server Version */ - get: operations["server_version_version_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flows/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Create Flow - * @description Gracefully creates a new flow from the provided schema. If a flow with the - * same name already exists, the existing flow is returned. - */ - post: operations["create_flow_flows__post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flows/{id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Flow - * @description Get a flow by id. - */ - get: operations["read_flow_flows__id__get"]; - put?: never; - post?: never; - /** - * Delete Flow - * @description Delete a flow by id. - */ - delete: operations["delete_flow_flows__id__delete"]; - options?: never; - head?: never; - /** - * Update Flow - * @description Updates a flow. - */ - patch: operations["update_flow_flows__id__patch"]; - trace?: never; - }; - "/flows/count": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Count Flows - * @description Count flows. - */ - post: operations["count_flows_flows_count_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flows/name/{name}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Flow By Name - * @description Get a flow by name. - */ - get: operations["read_flow_by_name_flows_name__name__get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flows/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Flows - * @description Query for flows. - */ - post: operations["read_flows_flows_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flows/paginate": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Paginate Flows - * @description Pagination query for flows. - */ - post: operations["paginate_flows_flows_paginate_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_runs/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Create Flow Run - * @description Create a flow run. If a flow run with the same flow_id and - * idempotency key already exists, the existing flow run will be returned. - * - * If no state is provided, the flow run will be created in a PENDING state. - */ - post: operations["create_flow_run_flow_runs__post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_runs/{id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Flow Run - * @description Get a flow run by id. - */ - get: operations["read_flow_run_flow_runs__id__get"]; - put?: never; - post?: never; - /** - * Delete Flow Run - * @description Delete a flow run by id. - */ - delete: operations["delete_flow_run_flow_runs__id__delete"]; - options?: never; - head?: never; - /** - * Update Flow Run - * @description Updates a flow run. - */ - patch: operations["update_flow_run_flow_runs__id__patch"]; - trace?: never; - }; - "/flow_runs/count": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Count Flow Runs - * @description Query for flow runs. - */ - post: operations["count_flow_runs_flow_runs_count_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_runs/lateness": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Average Flow Run Lateness - * @description Query for average flow-run lateness in seconds. - */ - post: operations["average_flow_run_lateness_flow_runs_lateness_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_runs/history": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Flow Run History - * @description Query for flow run history data across a given range and interval. - */ - post: operations["flow_run_history_flow_runs_history_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_runs/{id}/graph": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Flow Run Graph V1 - * @description Get a task run dependency map for a given flow run. - */ - get: operations["read_flow_run_graph_v1_flow_runs__id__graph_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_runs/{id}/graph-v2": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Flow Run Graph V2 - * @description Get a graph of the tasks and subflow runs for the given flow run - */ - get: operations["read_flow_run_graph_v2_flow_runs__id__graph_v2_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_runs/{id}/resume": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Resume Flow Run - * @description Resume a paused flow run. - */ - post: operations["resume_flow_run_flow_runs__id__resume_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_runs/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Flow Runs - * @description Query for flow runs. - */ - post: operations["read_flow_runs_flow_runs_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_runs/{id}/set_state": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Set Flow Run State - * @description Set a flow run state, invoking any orchestration rules. - */ - post: operations["set_flow_run_state_flow_runs__id__set_state_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_runs/{id}/input": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Create Flow Run Input - * @description Create a key/value input for a flow run. - */ - post: operations["create_flow_run_input_flow_runs__id__input_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_runs/{id}/input/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Filter Flow Run Input - * @description Filter flow run inputs by key prefix - */ - post: operations["filter_flow_run_input_flow_runs__id__input_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_runs/{id}/input/{key}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Flow Run Input - * @description Create a value from a flow run input - */ - get: operations["read_flow_run_input_flow_runs__id__input__key__get"]; - put?: never; - post?: never; - /** - * Delete Flow Run Input - * @description Delete a flow run input - */ - delete: operations["delete_flow_run_input_flow_runs__id__input__key__delete"]; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_runs/paginate": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Paginate Flow Runs - * @description Pagination query for flow runs. - */ - post: operations["paginate_flow_runs_flow_runs_paginate_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_runs/{id}/logs/download": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Download Logs - * @description Download all flow run logs as a CSV file, collecting all logs until there are no more logs to retrieve. - */ - get: operations["download_logs_flow_runs__id__logs_download_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/task_runs/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Create Task Run - * @description Create a task run. If a task run with the same flow_run_id, - * task_key, and dynamic_key already exists, the existing task - * run will be returned. - * - * If no state is provided, the task run will be created in a PENDING state. - */ - post: operations["create_task_run_task_runs__post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/task_runs/{id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Task Run - * @description Get a task run by id. - */ - get: operations["read_task_run_task_runs__id__get"]; - put?: never; - post?: never; - /** - * Delete Task Run - * @description Delete a task run by id. - */ - delete: operations["delete_task_run_task_runs__id__delete"]; - options?: never; - head?: never; - /** - * Update Task Run - * @description Updates a task run. - */ - patch: operations["update_task_run_task_runs__id__patch"]; - trace?: never; - }; - "/task_runs/count": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Count Task Runs - * @description Count task runs. - */ - post: operations["count_task_runs_task_runs_count_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/task_runs/history": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Task Run History - * @description Query for task run history data across a given range and interval. - */ - post: operations["task_run_history_task_runs_history_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/task_runs/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Task Runs - * @description Query for task runs. - */ - post: operations["read_task_runs_task_runs_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/task_runs/{id}/set_state": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Set Task Run State - * @description Set a task run state, invoking any orchestration rules. - */ - post: operations["set_task_run_state_task_runs__id__set_state_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_run_states/{id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Flow Run State - * @description Get a flow run state by id. - */ - get: operations["read_flow_run_state_flow_run_states__id__get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_run_states/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Flow Run States - * @description Get states associated with a flow run. - */ - get: operations["read_flow_run_states_flow_run_states__get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/task_run_states/{id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Task Run State - * @description Get a task run state by id. - */ - get: operations["read_task_run_state_task_run_states__id__get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/task_run_states/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Task Run States - * @description Get states associated with a task run. - */ - get: operations["read_task_run_states_task_run_states__get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_run_notification_policies/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Create Flow Run Notification Policy - * @description Creates a new flow run notification policy. - */ - post: operations["create_flow_run_notification_policy_flow_run_notification_policies__post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/flow_run_notification_policies/{id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Flow Run Notification Policy - * @description Get a flow run notification policy by id. - */ - get: operations["read_flow_run_notification_policy_flow_run_notification_policies__id__get"]; - put?: never; - post?: never; - /** - * Delete Flow Run Notification Policy - * @description Delete a flow run notification policy by id. - */ - delete: operations["delete_flow_run_notification_policy_flow_run_notification_policies__id__delete"]; - options?: never; - head?: never; - /** - * Update Flow Run Notification Policy - * @description Updates an existing flow run notification policy. - */ - patch: operations["update_flow_run_notification_policy_flow_run_notification_policies__id__patch"]; - trace?: never; - }; - "/flow_run_notification_policies/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Flow Run Notification Policies - * @description Query for flow run notification policies. - */ - post: operations["read_flow_run_notification_policies_flow_run_notification_policies_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/deployments/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Create Deployment - * @description Gracefully creates a new deployment from the provided schema. If a deployment with - * the same name and flow_id already exists, the deployment is updated. - * - * If the deployment has an active schedule, flow runs will be scheduled. - * When upserting, any scheduled runs from the existing deployment will be deleted. - */ - post: operations["create_deployment_deployments__post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/deployments/{id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Deployment - * @description Get a deployment by id. - */ - get: operations["read_deployment_deployments__id__get"]; - put?: never; - post?: never; - /** - * Delete Deployment - * @description Delete a deployment by id. - */ - delete: operations["delete_deployment_deployments__id__delete"]; - options?: never; - head?: never; - /** Update Deployment */ - patch: operations["update_deployment_deployments__id__patch"]; - trace?: never; - }; - "/deployments/name/{flow_name}/{deployment_name}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Deployment By Name - * @description Get a deployment using the name of the flow and the deployment. - */ - get: operations["read_deployment_by_name_deployments_name__flow_name___deployment_name__get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/deployments/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Deployments - * @description Query for deployments. - */ - post: operations["read_deployments_deployments_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/deployments/paginate": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Paginate Deployments - * @description Pagination query for flow runs. - */ - post: operations["paginate_deployments_deployments_paginate_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/deployments/get_scheduled_flow_runs": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Get Scheduled Flow Runs For Deployments - * @description Get scheduled runs for a set of deployments. Used by a runner to poll for work. - */ - post: operations["get_scheduled_flow_runs_for_deployments_deployments_get_scheduled_flow_runs_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/deployments/count": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Count Deployments - * @description Count deployments. - */ - post: operations["count_deployments_deployments_count_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/deployments/{id}/schedule": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Schedule Deployment - * @description Schedule runs for a deployment. For backfills, provide start/end times in the past. - * - * This function will generate the minimum number of runs that satisfy the min - * and max times, and the min and max counts. Specifically, the following order - * will be respected. - * - * - Runs will be generated starting on or after the `start_time` - * - No more than `max_runs` runs will be generated - * - No runs will be generated after `end_time` is reached - * - At least `min_runs` runs will be generated - * - Runs will be generated until at least `start_time + min_time` is reached - */ - post: operations["schedule_deployment_deployments__id__schedule_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/deployments/{id}/resume_deployment": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Resume Deployment - * @description Set a deployment schedule to active. Runs will be scheduled immediately. - */ - post: operations["resume_deployment_deployments__id__resume_deployment_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/deployments/{id}/pause_deployment": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Pause Deployment - * @description Set a deployment schedule to inactive. Any auto-scheduled runs still in a Scheduled - * state will be deleted. - */ - post: operations["pause_deployment_deployments__id__pause_deployment_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/deployments/{id}/create_flow_run": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Create Flow Run From Deployment - * @description Create a flow run from a deployment. - * - * Any parameters not provided will be inferred from the deployment's parameters. - * If tags are not provided, the deployment's tags will be used. - * - * If no state is provided, the flow run will be created in a SCHEDULED state. - */ - post: operations["create_flow_run_from_deployment_deployments__id__create_flow_run_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/deployments/{id}/work_queue_check": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Work Queue Check For Deployment - * @deprecated - * @description Get list of work-queues that are able to pick up the specified deployment. - * - * This endpoint is intended to be used by the UI to provide users warnings - * about deployments that are unable to be executed because there are no work - * queues that will pick up their runs, based on existing filter criteria. It - * may be deprecated in the future because there is not a strict relationship - * between work queues and deployments. - */ - get: operations["work_queue_check_for_deployment_deployments__id__work_queue_check_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/deployments/{id}/schedules": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** Read Deployment Schedules */ - get: operations["read_deployment_schedules_deployments__id__schedules_get"]; - put?: never; - /** Create Deployment Schedules */ - post: operations["create_deployment_schedules_deployments__id__schedules_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/deployments/{id}/schedules/{schedule_id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - post?: never; - /** Delete Deployment Schedule */ - delete: operations["delete_deployment_schedule_deployments__id__schedules__schedule_id__delete"]; - options?: never; - head?: never; - /** Update Deployment Schedule */ - patch: operations["update_deployment_schedule_deployments__id__schedules__schedule_id__patch"]; - trace?: never; - }; - "/saved_searches/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - /** - * Create Saved Search - * @description Gracefully creates a new saved search from the provided schema. - * - * If a saved search with the same name already exists, the saved search's fields are - * replaced. - */ - put: operations["create_saved_search_saved_searches__put"]; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/saved_searches/{id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Saved Search - * @description Get a saved search by id. - */ - get: operations["read_saved_search_saved_searches__id__get"]; - put?: never; - post?: never; - /** - * Delete Saved Search - * @description Delete a saved search by id. - */ - delete: operations["delete_saved_search_saved_searches__id__delete"]; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/saved_searches/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Saved Searches - * @description Query for saved searches. - */ - post: operations["read_saved_searches_saved_searches_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/logs/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Create Logs - * @description Create new logs from the provided schema. - */ - post: operations["create_logs_logs__post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/logs/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Logs - * @description Query for logs. - */ - post: operations["read_logs_logs_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/concurrency_limits/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Create Concurrency Limit */ - post: operations["create_concurrency_limit_concurrency_limits__post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/concurrency_limits/{id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Concurrency Limit - * @description Get a concurrency limit by id. - * - * The `active slots` field contains a list of TaskRun IDs currently using a - * concurrency slot for the specified tag. - */ - get: operations["read_concurrency_limit_concurrency_limits__id__get"]; - put?: never; - post?: never; - /** Delete Concurrency Limit */ - delete: operations["delete_concurrency_limit_concurrency_limits__id__delete"]; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/concurrency_limits/tag/{tag}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Concurrency Limit By Tag - * @description Get a concurrency limit by tag. - * - * The `active slots` field contains a list of TaskRun IDs currently using a - * concurrency slot for the specified tag. - */ - get: operations["read_concurrency_limit_by_tag_concurrency_limits_tag__tag__get"]; - put?: never; - post?: never; - /** Delete Concurrency Limit By Tag */ - delete: operations["delete_concurrency_limit_by_tag_concurrency_limits_tag__tag__delete"]; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/concurrency_limits/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Concurrency Limits - * @description Query for concurrency limits. - * - * For each concurrency limit the `active slots` field contains a list of TaskRun IDs - * currently using a concurrency slot for the specified tag. - */ - post: operations["read_concurrency_limits_concurrency_limits_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/concurrency_limits/tag/{tag}/reset": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Reset Concurrency Limit By Tag */ - post: operations["reset_concurrency_limit_by_tag_concurrency_limits_tag__tag__reset_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/concurrency_limits/increment": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Increment Concurrency Limits V1 */ - post: operations["increment_concurrency_limits_v1_concurrency_limits_increment_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/concurrency_limits/decrement": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Decrement Concurrency Limits V1 */ - post: operations["decrement_concurrency_limits_v1_concurrency_limits_decrement_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/v2/concurrency_limits/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Create Concurrency Limit V2 */ - post: operations["create_concurrency_limit_v2_v2_concurrency_limits__post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/v2/concurrency_limits/{id_or_name}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** Read Concurrency Limit V2 */ - get: operations["read_concurrency_limit_v2_v2_concurrency_limits__id_or_name__get"]; - put?: never; - post?: never; - /** Delete Concurrency Limit V2 */ - delete: operations["delete_concurrency_limit_v2_v2_concurrency_limits__id_or_name__delete"]; - options?: never; - head?: never; - /** Update Concurrency Limit V2 */ - patch: operations["update_concurrency_limit_v2_v2_concurrency_limits__id_or_name__patch"]; - trace?: never; - }; - "/v2/concurrency_limits/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Read All Concurrency Limits V2 */ - post: operations["read_all_concurrency_limits_v2_v2_concurrency_limits_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/v2/concurrency_limits/increment": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Bulk Increment Active Slots */ - post: operations["bulk_increment_active_slots_v2_concurrency_limits_increment_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/v2/concurrency_limits/decrement": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Bulk Decrement Active Slots */ - post: operations["bulk_decrement_active_slots_v2_concurrency_limits_decrement_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/block_types/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Create Block Type - * @description Create a new block type - */ - post: operations["create_block_type_block_types__post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/block_types/{id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Block Type By Id - * @description Get a block type by ID. - */ - get: operations["read_block_type_by_id_block_types__id__get"]; - put?: never; - post?: never; - /** Delete Block Type */ - delete: operations["delete_block_type_block_types__id__delete"]; - options?: never; - head?: never; - /** - * Update Block Type - * @description Update a block type. - */ - patch: operations["update_block_type_block_types__id__patch"]; - trace?: never; - }; - "/block_types/slug/{slug}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Block Type By Slug - * @description Get a block type by name. - */ - get: operations["read_block_type_by_slug_block_types_slug__slug__get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/block_types/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Block Types - * @description Gets all block types. Optionally limit return with limit and offset. - */ - post: operations["read_block_types_block_types_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/block_types/slug/{slug}/block_documents": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** Read Block Documents For Block Type */ - get: operations["read_block_documents_for_block_type_block_types_slug__slug__block_documents_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/block_types/slug/{slug}/block_documents/name/{block_document_name}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** Read Block Document By Name For Block Type */ - get: operations["read_block_document_by_name_for_block_type_block_types_slug__slug__block_documents_name__block_document_name__get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/block_types/install_system_block_types": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Install System Block Types */ - post: operations["install_system_block_types_block_types_install_system_block_types_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/block_documents/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Create Block Document - * @description Create a new block document. - */ - post: operations["create_block_document_block_documents__post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/block_documents/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Block Documents - * @description Query for block documents. - */ - post: operations["read_block_documents_block_documents_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/block_documents/count": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Count Block Documents - * @description Count block documents. - */ - post: operations["count_block_documents_block_documents_count_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/block_documents/{id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** Read Block Document By Id */ - get: operations["read_block_document_by_id_block_documents__id__get"]; - put?: never; - post?: never; - /** Delete Block Document */ - delete: operations["delete_block_document_block_documents__id__delete"]; - options?: never; - head?: never; - /** Update Block Document Data */ - patch: operations["update_block_document_data_block_documents__id__patch"]; - trace?: never; - }; - "/work_pools/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Create Work Pool - * @description Creates a new work pool. If a work pool with the same - * name already exists, an error will be raised. - */ - post: operations["create_work_pool_work_pools__post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/work_pools/{name}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Work Pool - * @description Read a work pool by name - */ - get: operations["read_work_pool_work_pools__name__get"]; - put?: never; - post?: never; - /** - * Delete Work Pool - * @description Delete a work pool - */ - delete: operations["delete_work_pool_work_pools__name__delete"]; - options?: never; - head?: never; - /** - * Update Work Pool - * @description Update a work pool - */ - patch: operations["update_work_pool_work_pools__name__patch"]; - trace?: never; - }; - "/work_pools/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Work Pools - * @description Read multiple work pools - */ - post: operations["read_work_pools_work_pools_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/work_pools/count": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Count Work Pools - * @description Count work pools - */ - post: operations["count_work_pools_work_pools_count_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/work_pools/{name}/get_scheduled_flow_runs": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Get Scheduled Flow Runs - * @description Load scheduled runs for a worker - */ - post: operations["get_scheduled_flow_runs_work_pools__name__get_scheduled_flow_runs_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/work_pools/{work_pool_name}/queues": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Create Work Queue - * @description Creates a new work pool queue. If a work pool queue with the same - * name already exists, an error will be raised. - */ - post: operations["create_work_queue_work_pools__work_pool_name__queues_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/work_pools/{work_pool_name}/queues/{name}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Work Queue - * @description Read a work pool queue - */ - get: operations["read_work_queue_work_pools__work_pool_name__queues__name__get"]; - put?: never; - post?: never; - /** - * Delete Work Queue - * @description Delete a work pool queue - */ - delete: operations["delete_work_queue_work_pools__work_pool_name__queues__name__delete"]; - options?: never; - head?: never; - /** - * Update Work Queue - * @description Update a work pool queue - */ - patch: operations["update_work_queue_work_pools__work_pool_name__queues__name__patch"]; - trace?: never; - }; - "/work_pools/{work_pool_name}/queues/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Work Queues - * @description Read all work pool queues - */ - post: operations["read_work_queues_work_pools__work_pool_name__queues_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/work_pools/{work_pool_name}/workers/heartbeat": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Worker Heartbeat */ - post: operations["worker_heartbeat_work_pools__work_pool_name__workers_heartbeat_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/work_pools/{work_pool_name}/workers/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Workers - * @description Read all worker processes - */ - post: operations["read_workers_work_pools__work_pool_name__workers_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/work_pools/{work_pool_name}/workers/{name}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - post?: never; - /** - * Delete Worker - * @description Delete a work pool's worker - */ - delete: operations["delete_worker_work_pools__work_pool_name__workers__name__delete"]; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/task_workers/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Task Workers - * @description Read active task workers. Optionally filter by task keys. - */ - post: operations["read_task_workers_task_workers_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/work_queues/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Create Work Queue - * @description Creates a new work queue. - * - * If a work queue with the same name already exists, an error - * will be raised. - */ - post: operations["create_work_queue_work_queues__post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/work_queues/{id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Work Queue - * @description Get a work queue by id. - */ - get: operations["read_work_queue_work_queues__id__get"]; - put?: never; - post?: never; - /** - * Delete Work Queue - * @description Delete a work queue by id. - */ - delete: operations["delete_work_queue_work_queues__id__delete"]; - options?: never; - head?: never; - /** - * Update Work Queue - * @description Updates an existing work queue. - */ - patch: operations["update_work_queue_work_queues__id__patch"]; - trace?: never; - }; - "/work_queues/name/{name}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Work Queue By Name - * @description Get a work queue by id. - */ - get: operations["read_work_queue_by_name_work_queues_name__name__get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/work_queues/{id}/get_runs": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Work Queue Runs - * @description Get flow runs from the work queue. - */ - post: operations["read_work_queue_runs_work_queues__id__get_runs_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/work_queues/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Work Queues - * @description Query for work queues. - */ - post: operations["read_work_queues_work_queues_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/work_queues/{id}/status": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Work Queue Status - * @description Get the status of a work queue. - */ - get: operations["read_work_queue_status_work_queues__id__status_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/artifacts/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Create Artifact */ - post: operations["create_artifact_artifacts__post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/artifacts/{id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Artifact - * @description Retrieve an artifact from the database. - */ - get: operations["read_artifact_artifacts__id__get"]; - put?: never; - post?: never; - /** - * Delete Artifact - * @description Delete an artifact from the database. - */ - delete: operations["delete_artifact_artifacts__id__delete"]; - options?: never; - head?: never; - /** - * Update Artifact - * @description Update an artifact in the database. - */ - patch: operations["update_artifact_artifacts__id__patch"]; - trace?: never; - }; - "/artifacts/{key}/latest": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Latest Artifact - * @description Retrieve the latest artifact from the artifact table. - */ - get: operations["read_latest_artifact_artifacts__key__latest_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/artifacts/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Artifacts - * @description Retrieve artifacts from the database. - */ - post: operations["read_artifacts_artifacts_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/artifacts/latest/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Latest Artifacts - * @description Retrieve artifacts from the database. - */ - post: operations["read_latest_artifacts_artifacts_latest_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/artifacts/count": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Count Artifacts - * @description Count artifacts from the database. - */ - post: operations["count_artifacts_artifacts_count_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/artifacts/latest/count": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Count Latest Artifacts - * @description Count artifacts from the database. - */ - post: operations["count_latest_artifacts_artifacts_latest_count_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/block_schemas/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Create Block Schema */ - post: operations["create_block_schema_block_schemas__post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/block_schemas/{id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Block Schema By Id - * @description Get a block schema by id. - */ - get: operations["read_block_schema_by_id_block_schemas__id__get"]; - put?: never; - post?: never; - /** - * Delete Block Schema - * @description Delete a block schema by id. - */ - delete: operations["delete_block_schema_block_schemas__id__delete"]; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/block_schemas/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Block Schemas - * @description Read all block schemas, optionally filtered by type - */ - post: operations["read_block_schemas_block_schemas_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/block_schemas/checksum/{checksum}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** Read Block Schema By Checksum */ - get: operations["read_block_schema_by_checksum_block_schemas_checksum__checksum__get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/block_capabilities/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** Read Available Block Capabilities */ - get: operations["read_available_block_capabilities_block_capabilities__get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/collections/views/{view}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read View Content - * @description Reads the content of a view from the prefect-collection-registry. - */ - get: operations["read_view_content_collections_views__view__get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/variables/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Create Variable */ - post: operations["create_variable_variables__post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/variables/{id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** Read Variable */ - get: operations["read_variable_variables__id__get"]; - put?: never; - post?: never; - /** Delete Variable */ - delete: operations["delete_variable_variables__id__delete"]; - options?: never; - head?: never; - /** Update Variable */ - patch: operations["update_variable_variables__id__patch"]; - trace?: never; - }; - "/variables/name/{name}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** Read Variable By Name */ - get: operations["read_variable_by_name_variables_name__name__get"]; - put?: never; - post?: never; - /** Delete Variable By Name */ - delete: operations["delete_variable_by_name_variables_name__name__delete"]; - options?: never; - head?: never; - /** Update Variable By Name */ - patch: operations["update_variable_by_name_variables_name__name__patch"]; - trace?: never; - }; - "/variables/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Read Variables */ - post: operations["read_variables_variables_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/variables/count": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Count Variables */ - post: operations["count_variables_variables_count_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/csrf-token": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Create Csrf Token - * @description Create or update a CSRF token for a client - */ - get: operations["create_csrf_token_csrf_token_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/events": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Create Events - * @description Record a batch of Events - */ - post: operations["create_events_events_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/events/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Read Events - * @description Queries for Events matching the given filter criteria in the given Account. Returns - * the first page of results, and the URL to request the next page (if there are more - * results). - */ - post: operations["read_events_events_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/events/filter/next": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Account Events Page - * @description Returns the next page of Events for a previous query against the given Account, and - * the URL to request the next page (if there are more results). - */ - get: operations["read_account_events_page_events_filter_next_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/events/count-by/{countable}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Count Account Events - * @description Returns distinct objects and the count of events associated with them. Objects - * that can be counted include the day the event occurred, the type of event, or - * the IDs of the resources associated with the event. - */ - post: operations["count_account_events_events_count_by__countable__post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/automations/": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Create Automation */ - post: operations["create_automation_automations__post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/automations/{id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** Read Automation */ - get: operations["read_automation_automations__id__get"]; - /** Update Automation */ - put: operations["update_automation_automations__id__put"]; - post?: never; - /** Delete Automation */ - delete: operations["delete_automation_automations__id__delete"]; - options?: never; - head?: never; - /** Patch Automation */ - patch: operations["patch_automation_automations__id__patch"]; - trace?: never; - }; - "/automations/filter": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Read Automations */ - post: operations["read_automations_automations_filter_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/automations/count": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Count Automations */ - post: operations["count_automations_automations_count_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/automations/related-to/{resource_id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** Read Automations Related To Resource */ - get: operations["read_automations_related_to_resource_automations_related_to__resource_id__get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/automations/owned-by/{resource_id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - post?: never; - /** Delete Automations Owned By Resource */ - delete: operations["delete_automations_owned_by_resource_automations_owned_by__resource_id__delete"]; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/templates/validate": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Validate Template */ - post: operations["validate_template_templates_validate_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/ui/flows/count-deployments": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Count Deployments By Flow - * @description Get deployment counts by flow id. - */ - post: operations["count_deployments_by_flow_ui_flows_count_deployments_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/ui/flows/next-runs": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Next Runs By Flow - * @description Get the next flow run by flow id. - */ - post: operations["next_runs_by_flow_ui_flows_next_runs_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/ui/flow_runs/history": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Read Flow Run History */ - post: operations["read_flow_run_history_ui_flow_runs_history_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/ui/flow_runs/count-task-runs": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Count Task Runs By Flow Run - * @description Get task run counts by flow run id. - */ - post: operations["count_task_runs_by_flow_run_ui_flow_runs_count_task_runs_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/ui/schemas/validate": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Validate Obj */ - post: operations["validate_obj_ui_schemas_validate_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/ui/task_runs/dashboard/counts": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Read Dashboard Task Run Counts */ - post: operations["read_dashboard_task_run_counts_ui_task_runs_dashboard_counts_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/ui/task_runs/count": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** Read Task Run Counts By State */ - post: operations["read_task_run_counts_by_state_ui_task_runs_count_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/admin/settings": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Settings - * @description Get the current Prefect REST API settings. - * - * Secret setting values will be obfuscated. - */ - get: operations["read_settings_admin_settings_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/admin/version": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Read Version - * @description Returns the Prefect version number - */ - get: operations["read_version_admin_version_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/admin/database/clear": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Clear Database - * @description Clear all database tables without dropping them. - */ - post: operations["clear_database_admin_database_clear_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/admin/database/drop": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Drop Database - * @description Drop all database objects. - */ - post: operations["drop_database_admin_database_drop_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/admin/database/create": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Create Database - * @description Create all database objects. - */ - post: operations["create_database_admin_database_create_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/hello": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Hello - * @description Say hello! - */ - get: operations["hello_hello_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/ready": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** Perform Readiness Check */ - get: operations["perform_readiness_check_ready_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; + "/health": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Health Check */ + get: operations["health_check_health_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/version": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Server Version */ + get: operations["server_version_version_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flows/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Create Flow + * @description Gracefully creates a new flow from the provided schema. If a flow with the + * same name already exists, the existing flow is returned. + */ + post: operations["create_flow_flows__post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flows/{id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Flow + * @description Get a flow by id. + */ + get: operations["read_flow_flows__id__get"]; + put?: never; + post?: never; + /** + * Delete Flow + * @description Delete a flow by id. + */ + delete: operations["delete_flow_flows__id__delete"]; + options?: never; + head?: never; + /** + * Update Flow + * @description Updates a flow. + */ + patch: operations["update_flow_flows__id__patch"]; + trace?: never; + }; + "/flows/count": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Count Flows + * @description Count flows. + */ + post: operations["count_flows_flows_count_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flows/name/{name}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Flow By Name + * @description Get a flow by name. + */ + get: operations["read_flow_by_name_flows_name__name__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flows/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Flows + * @description Query for flows. + */ + post: operations["read_flows_flows_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flows/paginate": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Paginate Flows + * @description Pagination query for flows. + */ + post: operations["paginate_flows_flows_paginate_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_runs/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Create Flow Run + * @description Create a flow run. If a flow run with the same flow_id and + * idempotency key already exists, the existing flow run will be returned. + * + * If no state is provided, the flow run will be created in a PENDING state. + */ + post: operations["create_flow_run_flow_runs__post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_runs/{id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Flow Run + * @description Get a flow run by id. + */ + get: operations["read_flow_run_flow_runs__id__get"]; + put?: never; + post?: never; + /** + * Delete Flow Run + * @description Delete a flow run by id. + */ + delete: operations["delete_flow_run_flow_runs__id__delete"]; + options?: never; + head?: never; + /** + * Update Flow Run + * @description Updates a flow run. + */ + patch: operations["update_flow_run_flow_runs__id__patch"]; + trace?: never; + }; + "/flow_runs/count": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Count Flow Runs + * @description Query for flow runs. + */ + post: operations["count_flow_runs_flow_runs_count_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_runs/lateness": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Average Flow Run Lateness + * @description Query for average flow-run lateness in seconds. + */ + post: operations["average_flow_run_lateness_flow_runs_lateness_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_runs/history": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Flow Run History + * @description Query for flow run history data across a given range and interval. + */ + post: operations["flow_run_history_flow_runs_history_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_runs/{id}/graph": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Flow Run Graph V1 + * @description Get a task run dependency map for a given flow run. + */ + get: operations["read_flow_run_graph_v1_flow_runs__id__graph_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_runs/{id}/graph-v2": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Flow Run Graph V2 + * @description Get a graph of the tasks and subflow runs for the given flow run + */ + get: operations["read_flow_run_graph_v2_flow_runs__id__graph_v2_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_runs/{id}/resume": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Resume Flow Run + * @description Resume a paused flow run. + */ + post: operations["resume_flow_run_flow_runs__id__resume_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_runs/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Flow Runs + * @description Query for flow runs. + */ + post: operations["read_flow_runs_flow_runs_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_runs/{id}/set_state": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Set Flow Run State + * @description Set a flow run state, invoking any orchestration rules. + */ + post: operations["set_flow_run_state_flow_runs__id__set_state_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_runs/{id}/input": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Create Flow Run Input + * @description Create a key/value input for a flow run. + */ + post: operations["create_flow_run_input_flow_runs__id__input_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_runs/{id}/input/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Filter Flow Run Input + * @description Filter flow run inputs by key prefix + */ + post: operations["filter_flow_run_input_flow_runs__id__input_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_runs/{id}/input/{key}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Flow Run Input + * @description Create a value from a flow run input + */ + get: operations["read_flow_run_input_flow_runs__id__input__key__get"]; + put?: never; + post?: never; + /** + * Delete Flow Run Input + * @description Delete a flow run input + */ + delete: operations["delete_flow_run_input_flow_runs__id__input__key__delete"]; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_runs/paginate": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Paginate Flow Runs + * @description Pagination query for flow runs. + */ + post: operations["paginate_flow_runs_flow_runs_paginate_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_runs/{id}/logs/download": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Download Logs + * @description Download all flow run logs as a CSV file, collecting all logs until there are no more logs to retrieve. + */ + get: operations["download_logs_flow_runs__id__logs_download_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_runs/{id}/labels": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + /** + * Update Flow Run Labels + * @description Update the labels of a flow run. + */ + patch: operations["update_flow_run_labels_flow_runs__id__labels_patch"]; + trace?: never; + }; + "/task_runs/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Create Task Run + * @description Create a task run. If a task run with the same flow_run_id, + * task_key, and dynamic_key already exists, the existing task + * run will be returned. + * + * If no state is provided, the task run will be created in a PENDING state. + */ + post: operations["create_task_run_task_runs__post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/task_runs/{id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Task Run + * @description Get a task run by id. + */ + get: operations["read_task_run_task_runs__id__get"]; + put?: never; + post?: never; + /** + * Delete Task Run + * @description Delete a task run by id. + */ + delete: operations["delete_task_run_task_runs__id__delete"]; + options?: never; + head?: never; + /** + * Update Task Run + * @description Updates a task run. + */ + patch: operations["update_task_run_task_runs__id__patch"]; + trace?: never; + }; + "/task_runs/count": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Count Task Runs + * @description Count task runs. + */ + post: operations["count_task_runs_task_runs_count_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/task_runs/history": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Task Run History + * @description Query for task run history data across a given range and interval. + */ + post: operations["task_run_history_task_runs_history_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/task_runs/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Task Runs + * @description Query for task runs. + */ + post: operations["read_task_runs_task_runs_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/task_runs/{id}/set_state": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Set Task Run State + * @description Set a task run state, invoking any orchestration rules. + */ + post: operations["set_task_run_state_task_runs__id__set_state_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_run_states/{id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Flow Run State + * @description Get a flow run state by id. + */ + get: operations["read_flow_run_state_flow_run_states__id__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_run_states/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Flow Run States + * @description Get states associated with a flow run. + */ + get: operations["read_flow_run_states_flow_run_states__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/task_run_states/{id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Task Run State + * @description Get a task run state by id. + */ + get: operations["read_task_run_state_task_run_states__id__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/task_run_states/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Task Run States + * @description Get states associated with a task run. + */ + get: operations["read_task_run_states_task_run_states__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_run_notification_policies/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Create Flow Run Notification Policy + * @description Creates a new flow run notification policy. + */ + post: operations["create_flow_run_notification_policy_flow_run_notification_policies__post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/flow_run_notification_policies/{id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Flow Run Notification Policy + * @description Get a flow run notification policy by id. + */ + get: operations["read_flow_run_notification_policy_flow_run_notification_policies__id__get"]; + put?: never; + post?: never; + /** + * Delete Flow Run Notification Policy + * @description Delete a flow run notification policy by id. + */ + delete: operations["delete_flow_run_notification_policy_flow_run_notification_policies__id__delete"]; + options?: never; + head?: never; + /** + * Update Flow Run Notification Policy + * @description Updates an existing flow run notification policy. + */ + patch: operations["update_flow_run_notification_policy_flow_run_notification_policies__id__patch"]; + trace?: never; + }; + "/flow_run_notification_policies/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Flow Run Notification Policies + * @description Query for flow run notification policies. + */ + post: operations["read_flow_run_notification_policies_flow_run_notification_policies_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/deployments/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Create Deployment + * @description Gracefully creates a new deployment from the provided schema. If a deployment with + * the same name and flow_id already exists, the deployment is updated. + * + * If the deployment has an active schedule, flow runs will be scheduled. + * When upserting, any scheduled runs from the existing deployment will be deleted. + */ + post: operations["create_deployment_deployments__post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/deployments/{id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Deployment + * @description Get a deployment by id. + */ + get: operations["read_deployment_deployments__id__get"]; + put?: never; + post?: never; + /** + * Delete Deployment + * @description Delete a deployment by id. + */ + delete: operations["delete_deployment_deployments__id__delete"]; + options?: never; + head?: never; + /** Update Deployment */ + patch: operations["update_deployment_deployments__id__patch"]; + trace?: never; + }; + "/deployments/name/{flow_name}/{deployment_name}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Deployment By Name + * @description Get a deployment using the name of the flow and the deployment. + */ + get: operations["read_deployment_by_name_deployments_name__flow_name___deployment_name__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/deployments/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Deployments + * @description Query for deployments. + */ + post: operations["read_deployments_deployments_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/deployments/paginate": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Paginate Deployments + * @description Pagination query for flow runs. + */ + post: operations["paginate_deployments_deployments_paginate_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/deployments/get_scheduled_flow_runs": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Get Scheduled Flow Runs For Deployments + * @description Get scheduled runs for a set of deployments. Used by a runner to poll for work. + */ + post: operations["get_scheduled_flow_runs_for_deployments_deployments_get_scheduled_flow_runs_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/deployments/count": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Count Deployments + * @description Count deployments. + */ + post: operations["count_deployments_deployments_count_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/deployments/{id}/schedule": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Schedule Deployment + * @description Schedule runs for a deployment. For backfills, provide start/end times in the past. + * + * This function will generate the minimum number of runs that satisfy the min + * and max times, and the min and max counts. Specifically, the following order + * will be respected. + * + * - Runs will be generated starting on or after the `start_time` + * - No more than `max_runs` runs will be generated + * - No runs will be generated after `end_time` is reached + * - At least `min_runs` runs will be generated + * - Runs will be generated until at least `start_time + min_time` is reached + */ + post: operations["schedule_deployment_deployments__id__schedule_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/deployments/{id}/resume_deployment": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Resume Deployment + * @description Set a deployment schedule to active. Runs will be scheduled immediately. + */ + post: operations["resume_deployment_deployments__id__resume_deployment_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/deployments/{id}/pause_deployment": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Pause Deployment + * @description Set a deployment schedule to inactive. Any auto-scheduled runs still in a Scheduled + * state will be deleted. + */ + post: operations["pause_deployment_deployments__id__pause_deployment_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/deployments/{id}/create_flow_run": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Create Flow Run From Deployment + * @description Create a flow run from a deployment. + * + * Any parameters not provided will be inferred from the deployment's parameters. + * If tags are not provided, the deployment's tags will be used. + * + * If no state is provided, the flow run will be created in a SCHEDULED state. + */ + post: operations["create_flow_run_from_deployment_deployments__id__create_flow_run_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/deployments/{id}/work_queue_check": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Work Queue Check For Deployment + * @deprecated + * @description Get list of work-queues that are able to pick up the specified deployment. + * + * This endpoint is intended to be used by the UI to provide users warnings + * about deployments that are unable to be executed because there are no work + * queues that will pick up their runs, based on existing filter criteria. It + * may be deprecated in the future because there is not a strict relationship + * between work queues and deployments. + */ + get: operations["work_queue_check_for_deployment_deployments__id__work_queue_check_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/deployments/{id}/schedules": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Read Deployment Schedules */ + get: operations["read_deployment_schedules_deployments__id__schedules_get"]; + put?: never; + /** Create Deployment Schedules */ + post: operations["create_deployment_schedules_deployments__id__schedules_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/deployments/{id}/schedules/{schedule_id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + post?: never; + /** Delete Deployment Schedule */ + delete: operations["delete_deployment_schedule_deployments__id__schedules__schedule_id__delete"]; + options?: never; + head?: never; + /** Update Deployment Schedule */ + patch: operations["update_deployment_schedule_deployments__id__schedules__schedule_id__patch"]; + trace?: never; + }; + "/saved_searches/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + /** + * Create Saved Search + * @description Gracefully creates a new saved search from the provided schema. + * + * If a saved search with the same name already exists, the saved search's fields are + * replaced. + */ + put: operations["create_saved_search_saved_searches__put"]; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/saved_searches/{id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Saved Search + * @description Get a saved search by id. + */ + get: operations["read_saved_search_saved_searches__id__get"]; + put?: never; + post?: never; + /** + * Delete Saved Search + * @description Delete a saved search by id. + */ + delete: operations["delete_saved_search_saved_searches__id__delete"]; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/saved_searches/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Saved Searches + * @description Query for saved searches. + */ + post: operations["read_saved_searches_saved_searches_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/logs/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Create Logs + * @description Create new logs from the provided schema. + */ + post: operations["create_logs_logs__post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/logs/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Logs + * @description Query for logs. + */ + post: operations["read_logs_logs_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/concurrency_limits/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Create Concurrency Limit */ + post: operations["create_concurrency_limit_concurrency_limits__post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/concurrency_limits/{id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Concurrency Limit + * @description Get a concurrency limit by id. + * + * The `active slots` field contains a list of TaskRun IDs currently using a + * concurrency slot for the specified tag. + */ + get: operations["read_concurrency_limit_concurrency_limits__id__get"]; + put?: never; + post?: never; + /** Delete Concurrency Limit */ + delete: operations["delete_concurrency_limit_concurrency_limits__id__delete"]; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/concurrency_limits/tag/{tag}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Concurrency Limit By Tag + * @description Get a concurrency limit by tag. + * + * The `active slots` field contains a list of TaskRun IDs currently using a + * concurrency slot for the specified tag. + */ + get: operations["read_concurrency_limit_by_tag_concurrency_limits_tag__tag__get"]; + put?: never; + post?: never; + /** Delete Concurrency Limit By Tag */ + delete: operations["delete_concurrency_limit_by_tag_concurrency_limits_tag__tag__delete"]; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/concurrency_limits/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Concurrency Limits + * @description Query for concurrency limits. + * + * For each concurrency limit the `active slots` field contains a list of TaskRun IDs + * currently using a concurrency slot for the specified tag. + */ + post: operations["read_concurrency_limits_concurrency_limits_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/concurrency_limits/tag/{tag}/reset": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Reset Concurrency Limit By Tag */ + post: operations["reset_concurrency_limit_by_tag_concurrency_limits_tag__tag__reset_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/concurrency_limits/increment": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Increment Concurrency Limits V1 */ + post: operations["increment_concurrency_limits_v1_concurrency_limits_increment_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/concurrency_limits/decrement": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Decrement Concurrency Limits V1 */ + post: operations["decrement_concurrency_limits_v1_concurrency_limits_decrement_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v2/concurrency_limits/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Create Concurrency Limit V2 */ + post: operations["create_concurrency_limit_v2_v2_concurrency_limits__post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v2/concurrency_limits/{id_or_name}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Read Concurrency Limit V2 */ + get: operations["read_concurrency_limit_v2_v2_concurrency_limits__id_or_name__get"]; + put?: never; + post?: never; + /** Delete Concurrency Limit V2 */ + delete: operations["delete_concurrency_limit_v2_v2_concurrency_limits__id_or_name__delete"]; + options?: never; + head?: never; + /** Update Concurrency Limit V2 */ + patch: operations["update_concurrency_limit_v2_v2_concurrency_limits__id_or_name__patch"]; + trace?: never; + }; + "/v2/concurrency_limits/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Read All Concurrency Limits V2 */ + post: operations["read_all_concurrency_limits_v2_v2_concurrency_limits_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v2/concurrency_limits/increment": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Bulk Increment Active Slots */ + post: operations["bulk_increment_active_slots_v2_concurrency_limits_increment_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v2/concurrency_limits/decrement": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Bulk Decrement Active Slots */ + post: operations["bulk_decrement_active_slots_v2_concurrency_limits_decrement_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/block_types/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Create Block Type + * @description Create a new block type + */ + post: operations["create_block_type_block_types__post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/block_types/{id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Block Type By Id + * @description Get a block type by ID. + */ + get: operations["read_block_type_by_id_block_types__id__get"]; + put?: never; + post?: never; + /** Delete Block Type */ + delete: operations["delete_block_type_block_types__id__delete"]; + options?: never; + head?: never; + /** + * Update Block Type + * @description Update a block type. + */ + patch: operations["update_block_type_block_types__id__patch"]; + trace?: never; + }; + "/block_types/slug/{slug}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Block Type By Slug + * @description Get a block type by name. + */ + get: operations["read_block_type_by_slug_block_types_slug__slug__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/block_types/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Block Types + * @description Gets all block types. Optionally limit return with limit and offset. + */ + post: operations["read_block_types_block_types_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/block_types/slug/{slug}/block_documents": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Read Block Documents For Block Type */ + get: operations["read_block_documents_for_block_type_block_types_slug__slug__block_documents_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/block_types/slug/{slug}/block_documents/name/{block_document_name}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Read Block Document By Name For Block Type */ + get: operations["read_block_document_by_name_for_block_type_block_types_slug__slug__block_documents_name__block_document_name__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/block_types/install_system_block_types": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Install System Block Types */ + post: operations["install_system_block_types_block_types_install_system_block_types_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/block_documents/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Create Block Document + * @description Create a new block document. + */ + post: operations["create_block_document_block_documents__post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/block_documents/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Block Documents + * @description Query for block documents. + */ + post: operations["read_block_documents_block_documents_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/block_documents/count": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Count Block Documents + * @description Count block documents. + */ + post: operations["count_block_documents_block_documents_count_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/block_documents/{id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Read Block Document By Id */ + get: operations["read_block_document_by_id_block_documents__id__get"]; + put?: never; + post?: never; + /** Delete Block Document */ + delete: operations["delete_block_document_block_documents__id__delete"]; + options?: never; + head?: never; + /** Update Block Document Data */ + patch: operations["update_block_document_data_block_documents__id__patch"]; + trace?: never; + }; + "/work_pools/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Create Work Pool + * @description Creates a new work pool. If a work pool with the same + * name already exists, an error will be raised. + */ + post: operations["create_work_pool_work_pools__post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/work_pools/{name}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Work Pool + * @description Read a work pool by name + */ + get: operations["read_work_pool_work_pools__name__get"]; + put?: never; + post?: never; + /** + * Delete Work Pool + * @description Delete a work pool + */ + delete: operations["delete_work_pool_work_pools__name__delete"]; + options?: never; + head?: never; + /** + * Update Work Pool + * @description Update a work pool + */ + patch: operations["update_work_pool_work_pools__name__patch"]; + trace?: never; + }; + "/work_pools/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Work Pools + * @description Read multiple work pools + */ + post: operations["read_work_pools_work_pools_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/work_pools/count": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Count Work Pools + * @description Count work pools + */ + post: operations["count_work_pools_work_pools_count_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/work_pools/{name}/get_scheduled_flow_runs": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Get Scheduled Flow Runs + * @description Load scheduled runs for a worker + */ + post: operations["get_scheduled_flow_runs_work_pools__name__get_scheduled_flow_runs_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/work_pools/{work_pool_name}/queues": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Create Work Queue + * @description Creates a new work pool queue. If a work pool queue with the same + * name already exists, an error will be raised. + */ + post: operations["create_work_queue_work_pools__work_pool_name__queues_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/work_pools/{work_pool_name}/queues/{name}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Work Queue + * @description Read a work pool queue + */ + get: operations["read_work_queue_work_pools__work_pool_name__queues__name__get"]; + put?: never; + post?: never; + /** + * Delete Work Queue + * @description Delete a work pool queue + */ + delete: operations["delete_work_queue_work_pools__work_pool_name__queues__name__delete"]; + options?: never; + head?: never; + /** + * Update Work Queue + * @description Update a work pool queue + */ + patch: operations["update_work_queue_work_pools__work_pool_name__queues__name__patch"]; + trace?: never; + }; + "/work_pools/{work_pool_name}/queues/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Work Queues + * @description Read all work pool queues + */ + post: operations["read_work_queues_work_pools__work_pool_name__queues_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/work_pools/{work_pool_name}/workers/heartbeat": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Worker Heartbeat */ + post: operations["worker_heartbeat_work_pools__work_pool_name__workers_heartbeat_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/work_pools/{work_pool_name}/workers/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Workers + * @description Read all worker processes + */ + post: operations["read_workers_work_pools__work_pool_name__workers_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/work_pools/{work_pool_name}/workers/{name}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + post?: never; + /** + * Delete Worker + * @description Delete a work pool's worker + */ + delete: operations["delete_worker_work_pools__work_pool_name__workers__name__delete"]; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/task_workers/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Task Workers + * @description Read active task workers. Optionally filter by task keys. + */ + post: operations["read_task_workers_task_workers_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/work_queues/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Create Work Queue + * @description Creates a new work queue. + * + * If a work queue with the same name already exists, an error + * will be raised. + */ + post: operations["create_work_queue_work_queues__post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/work_queues/{id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Work Queue + * @description Get a work queue by id. + */ + get: operations["read_work_queue_work_queues__id__get"]; + put?: never; + post?: never; + /** + * Delete Work Queue + * @description Delete a work queue by id. + */ + delete: operations["delete_work_queue_work_queues__id__delete"]; + options?: never; + head?: never; + /** + * Update Work Queue + * @description Updates an existing work queue. + */ + patch: operations["update_work_queue_work_queues__id__patch"]; + trace?: never; + }; + "/work_queues/name/{name}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Work Queue By Name + * @description Get a work queue by id. + */ + get: operations["read_work_queue_by_name_work_queues_name__name__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/work_queues/{id}/get_runs": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Work Queue Runs + * @description Get flow runs from the work queue. + */ + post: operations["read_work_queue_runs_work_queues__id__get_runs_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/work_queues/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Work Queues + * @description Query for work queues. + */ + post: operations["read_work_queues_work_queues_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/work_queues/{id}/status": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Work Queue Status + * @description Get the status of a work queue. + */ + get: operations["read_work_queue_status_work_queues__id__status_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/artifacts/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Create Artifact */ + post: operations["create_artifact_artifacts__post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/artifacts/{id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Artifact + * @description Retrieve an artifact from the database. + */ + get: operations["read_artifact_artifacts__id__get"]; + put?: never; + post?: never; + /** + * Delete Artifact + * @description Delete an artifact from the database. + */ + delete: operations["delete_artifact_artifacts__id__delete"]; + options?: never; + head?: never; + /** + * Update Artifact + * @description Update an artifact in the database. + */ + patch: operations["update_artifact_artifacts__id__patch"]; + trace?: never; + }; + "/artifacts/{key}/latest": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Latest Artifact + * @description Retrieve the latest artifact from the artifact table. + */ + get: operations["read_latest_artifact_artifacts__key__latest_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/artifacts/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Artifacts + * @description Retrieve artifacts from the database. + */ + post: operations["read_artifacts_artifacts_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/artifacts/latest/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Latest Artifacts + * @description Retrieve artifacts from the database. + */ + post: operations["read_latest_artifacts_artifacts_latest_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/artifacts/count": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Count Artifacts + * @description Count artifacts from the database. + */ + post: operations["count_artifacts_artifacts_count_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/artifacts/latest/count": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Count Latest Artifacts + * @description Count artifacts from the database. + */ + post: operations["count_latest_artifacts_artifacts_latest_count_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/block_schemas/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Create Block Schema */ + post: operations["create_block_schema_block_schemas__post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/block_schemas/{id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Block Schema By Id + * @description Get a block schema by id. + */ + get: operations["read_block_schema_by_id_block_schemas__id__get"]; + put?: never; + post?: never; + /** + * Delete Block Schema + * @description Delete a block schema by id. + */ + delete: operations["delete_block_schema_block_schemas__id__delete"]; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/block_schemas/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Block Schemas + * @description Read all block schemas, optionally filtered by type + */ + post: operations["read_block_schemas_block_schemas_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/block_schemas/checksum/{checksum}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Read Block Schema By Checksum */ + get: operations["read_block_schema_by_checksum_block_schemas_checksum__checksum__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/block_capabilities/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Read Available Block Capabilities */ + get: operations["read_available_block_capabilities_block_capabilities__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/collections/views/{view}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read View Content + * @description Reads the content of a view from the prefect-collection-registry. + */ + get: operations["read_view_content_collections_views__view__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/variables/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Create Variable */ + post: operations["create_variable_variables__post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/variables/{id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Read Variable */ + get: operations["read_variable_variables__id__get"]; + put?: never; + post?: never; + /** Delete Variable */ + delete: operations["delete_variable_variables__id__delete"]; + options?: never; + head?: never; + /** Update Variable */ + patch: operations["update_variable_variables__id__patch"]; + trace?: never; + }; + "/variables/name/{name}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Read Variable By Name */ + get: operations["read_variable_by_name_variables_name__name__get"]; + put?: never; + post?: never; + /** Delete Variable By Name */ + delete: operations["delete_variable_by_name_variables_name__name__delete"]; + options?: never; + head?: never; + /** Update Variable By Name */ + patch: operations["update_variable_by_name_variables_name__name__patch"]; + trace?: never; + }; + "/variables/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Read Variables */ + post: operations["read_variables_variables_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/variables/count": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Count Variables */ + post: operations["count_variables_variables_count_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/csrf-token": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Create Csrf Token + * @description Create or update a CSRF token for a client + */ + get: operations["create_csrf_token_csrf_token_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/events": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Create Events + * @description Record a batch of Events + */ + post: operations["create_events_events_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/events/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Read Events + * @description Queries for Events matching the given filter criteria in the given Account. Returns + * the first page of results, and the URL to request the next page (if there are more + * results). + */ + post: operations["read_events_events_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/events/filter/next": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Account Events Page + * @description Returns the next page of Events for a previous query against the given Account, and + * the URL to request the next page (if there are more results). + */ + get: operations["read_account_events_page_events_filter_next_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/events/count-by/{countable}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Count Account Events + * @description Returns distinct objects and the count of events associated with them. Objects + * that can be counted include the day the event occurred, the type of event, or + * the IDs of the resources associated with the event. + */ + post: operations["count_account_events_events_count_by__countable__post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/automations/": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Create Automation */ + post: operations["create_automation_automations__post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/automations/{id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Read Automation */ + get: operations["read_automation_automations__id__get"]; + /** Update Automation */ + put: operations["update_automation_automations__id__put"]; + post?: never; + /** Delete Automation */ + delete: operations["delete_automation_automations__id__delete"]; + options?: never; + head?: never; + /** Patch Automation */ + patch: operations["patch_automation_automations__id__patch"]; + trace?: never; + }; + "/automations/filter": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Read Automations */ + post: operations["read_automations_automations_filter_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/automations/count": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Count Automations */ + post: operations["count_automations_automations_count_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/automations/related-to/{resource_id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Read Automations Related To Resource */ + get: operations["read_automations_related_to_resource_automations_related_to__resource_id__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/automations/owned-by/{resource_id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + post?: never; + /** Delete Automations Owned By Resource */ + delete: operations["delete_automations_owned_by_resource_automations_owned_by__resource_id__delete"]; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/templates/validate": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Validate Template */ + post: operations["validate_template_templates_validate_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/ui/flows/count-deployments": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Count Deployments By Flow + * @description Get deployment counts by flow id. + */ + post: operations["count_deployments_by_flow_ui_flows_count_deployments_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/ui/flows/next-runs": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Next Runs By Flow + * @description Get the next flow run by flow id. + */ + post: operations["next_runs_by_flow_ui_flows_next_runs_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/ui/flow_runs/history": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Read Flow Run History */ + post: operations["read_flow_run_history_ui_flow_runs_history_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/ui/flow_runs/count-task-runs": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Count Task Runs By Flow Run + * @description Get task run counts by flow run id. + */ + post: operations["count_task_runs_by_flow_run_ui_flow_runs_count_task_runs_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/ui/schemas/validate": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Validate Obj */ + post: operations["validate_obj_ui_schemas_validate_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/ui/task_runs/dashboard/counts": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Read Dashboard Task Run Counts */ + post: operations["read_dashboard_task_run_counts_ui_task_runs_dashboard_counts_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/ui/task_runs/count": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Read Task Run Counts By State */ + post: operations["read_task_run_counts_by_state_ui_task_runs_count_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/admin/settings": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Settings + * @description Get the current Prefect REST API settings. + * + * Secret setting values will be obfuscated. + */ + get: operations["read_settings_admin_settings_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/admin/version": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Read Version + * @description Returns the Prefect version number + */ + get: operations["read_version_admin_version_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/admin/database/clear": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Clear Database + * @description Clear all database tables without dropping them. + */ + post: operations["clear_database_admin_database_clear_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/admin/database/drop": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Drop Database + * @description Drop all database objects. + */ + post: operations["drop_database_admin_database_drop_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/admin/database/create": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Create Database + * @description Create all database objects. + */ + post: operations["create_database_admin_database_create_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/hello": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Hello + * @description Say hello! + */ + get: operations["hello_hello_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/ready": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Perform Readiness Check */ + get: operations["perform_readiness_check_ready_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; } export type webhooks = Record; export interface components { - schemas: { - /** Artifact */ - Artifact: { - /** - * Id - * Format: uuid - */ - id?: string; - /** Created */ - created?: string | null; - /** Updated */ - updated?: string | null; - /** - * Key - * @description An optional unique reference key for this artifact. - */ - key?: string | null; - /** - * Type - * @description An identifier that describes the shape of the data field. e.g. 'result', 'table', 'markdown' - */ - type?: string | null; - /** - * Description - * @description A markdown-enabled description of the artifact. - */ - description?: string | null; - /** - * Data - * @description Data associated with the artifact, e.g. a result.; structure depends on the artifact type. - */ - data?: Record | unknown | null; - /** - * Metadata - * @description User-defined artifact metadata. Content must be string key and value pairs. - */ - metadata_?: { - [key: string]: string; - } | null; - /** - * Flow Run Id - * @description The flow run associated with the artifact. - */ - flow_run_id?: string | null; - /** - * Task Run Id - * @description The task run associated with the artifact. - */ - task_run_id?: string | null; - }; - /** ArtifactCollection */ - ArtifactCollection: { - /** - * Id - * Format: uuid - */ - id?: string; - /** Created */ - created?: string | null; - /** Updated */ - updated?: string | null; - /** - * Key - * @description An optional unique reference key for this artifact. - */ - key: string; - /** - * Latest Id - * Format: uuid - * @description The latest artifact ID associated with the key. - */ - latest_id: string; - /** - * Type - * @description An identifier that describes the shape of the data field. e.g. 'result', 'table', 'markdown' - */ - type?: string | null; - /** - * Description - * @description A markdown-enabled description of the artifact. - */ - description?: string | null; - /** - * Data - * @description Data associated with the artifact, e.g. a result.; structure depends on the artifact type. - */ - data?: Record | unknown | null; - /** - * Metadata - * @description User-defined artifact metadata. Content must be string key and value pairs. - */ - metadata_?: { - [key: string]: string; - } | null; - /** - * Flow Run Id - * @description The flow run associated with the artifact. - */ - flow_run_id?: string | null; - /** - * Task Run Id - * @description The task run associated with the artifact. - */ - task_run_id?: string | null; - }; - /** - * ArtifactCollectionFilter - * @description Filter artifact collections. Only artifact collections matching all criteria will be returned - */ - ArtifactCollectionFilter: { - /** - * @description Operator for combining filter criteria. Defaults to 'and_'. - * @default and_ - */ - operator: components["schemas"]["Operator"]; - /** @description Filter criteria for `Artifact.id` */ - latest_id?: - | components["schemas"]["ArtifactCollectionFilterLatestId"] - | null; - /** @description Filter criteria for `Artifact.key` */ - key?: components["schemas"]["ArtifactCollectionFilterKey"] | null; - /** @description Filter criteria for `Artifact.flow_run_id` */ - flow_run_id?: - | components["schemas"]["ArtifactCollectionFilterFlowRunId"] - | null; - /** @description Filter criteria for `Artifact.task_run_id` */ - task_run_id?: - | components["schemas"]["ArtifactCollectionFilterTaskRunId"] - | null; - /** @description Filter criteria for `Artifact.type` */ - type?: components["schemas"]["ArtifactCollectionFilterType"] | null; - }; - /** - * ArtifactCollectionFilterFlowRunId - * @description Filter by `ArtifactCollection.flow_run_id`. - */ - ArtifactCollectionFilterFlowRunId: { - /** - * Any - * @description A list of flow run IDs to include - */ - any_?: string[] | null; - }; - /** - * ArtifactCollectionFilterKey - * @description Filter by `ArtifactCollection.key`. - */ - ArtifactCollectionFilterKey: { - /** - * Any - * @description A list of artifact keys to include - */ - any_?: string[] | null; - /** - * Like - * @description A string to match artifact keys against. This can include SQL wildcard characters like `%` and `_`. - */ - like_?: string | null; - /** - * Exists - * @description If `true`, only include artifacts with a non-null key. If `false`, only include artifacts with a null key. Should return all rows in the ArtifactCollection table if specified. - */ - exists_?: boolean | null; - }; - /** - * ArtifactCollectionFilterLatestId - * @description Filter by `ArtifactCollection.latest_id`. - */ - ArtifactCollectionFilterLatestId: { - /** - * Any - * @description A list of artifact ids to include - */ - any_?: string[] | null; - }; - /** - * ArtifactCollectionFilterTaskRunId - * @description Filter by `ArtifactCollection.task_run_id`. - */ - ArtifactCollectionFilterTaskRunId: { - /** - * Any - * @description A list of task run IDs to include - */ - any_?: string[] | null; - }; - /** - * ArtifactCollectionFilterType - * @description Filter by `ArtifactCollection.type`. - */ - ArtifactCollectionFilterType: { - /** - * Any - * @description A list of artifact types to include - */ - any_?: string[] | null; - /** - * Not Any - * @description A list of artifact types to exclude - */ - not_any_?: string[] | null; - }; - /** - * ArtifactCollectionSort - * @description Defines artifact collection sorting options. - * @enum {string} - */ - ArtifactCollectionSort: - | "CREATED_DESC" - | "UPDATED_DESC" - | "ID_DESC" - | "KEY_DESC" - | "KEY_ASC"; - /** - * ArtifactCreate - * @description Data used by the Prefect REST API to create an artifact. - */ - ArtifactCreate: { - /** - * Key - * @description An optional unique reference key for this artifact. - */ - key?: string | null; - /** - * Type - * @description An identifier that describes the shape of the data field. e.g. 'result', 'table', 'markdown' - */ - type?: string | null; - /** - * Description - * @description A markdown-enabled description of the artifact. - */ - description?: string | null; - /** - * Data - * @description Data associated with the artifact, e.g. a result.; structure depends on the artifact type. - */ - data?: Record | unknown | null; - /** - * Metadata - * @description User-defined artifact metadata. Content must be string key and value pairs. - */ - metadata_?: { - [key: string]: string; - } | null; - /** - * Flow Run Id - * @description The flow run associated with the artifact. - */ - flow_run_id?: string | null; - /** - * Task Run Id - * @description The task run associated with the artifact. - */ - task_run_id?: string | null; - }; - /** - * ArtifactFilter - * @description Filter artifacts. Only artifacts matching all criteria will be returned - */ - ArtifactFilter: { - /** - * @description Operator for combining filter criteria. Defaults to 'and_'. - * @default and_ - */ - operator: components["schemas"]["Operator"]; - /** @description Filter criteria for `Artifact.id` */ - id?: components["schemas"]["ArtifactFilterId"] | null; - /** @description Filter criteria for `Artifact.key` */ - key?: components["schemas"]["ArtifactFilterKey"] | null; - /** @description Filter criteria for `Artifact.flow_run_id` */ - flow_run_id?: components["schemas"]["ArtifactFilterFlowRunId"] | null; - /** @description Filter criteria for `Artifact.task_run_id` */ - task_run_id?: components["schemas"]["ArtifactFilterTaskRunId"] | null; - /** @description Filter criteria for `Artifact.type` */ - type?: components["schemas"]["ArtifactFilterType"] | null; - }; - /** - * ArtifactFilterFlowRunId - * @description Filter by `Artifact.flow_run_id`. - */ - ArtifactFilterFlowRunId: { - /** - * Any - * @description A list of flow run IDs to include - */ - any_?: string[] | null; - }; - /** - * ArtifactFilterId - * @description Filter by `Artifact.id`. - */ - ArtifactFilterId: { - /** - * Any - * @description A list of artifact ids to include - */ - any_?: string[] | null; - }; - /** - * ArtifactFilterKey - * @description Filter by `Artifact.key`. - */ - ArtifactFilterKey: { - /** - * Any - * @description A list of artifact keys to include - */ - any_?: string[] | null; - /** - * Like - * @description A string to match artifact keys against. This can include SQL wildcard characters like `%` and `_`. - */ - like_?: string | null; - /** - * Exists - * @description If `true`, only include artifacts with a non-null key. If `false`, only include artifacts with a null key. - */ - exists_?: boolean | null; - }; - /** - * ArtifactFilterTaskRunId - * @description Filter by `Artifact.task_run_id`. - */ - ArtifactFilterTaskRunId: { - /** - * Any - * @description A list of task run IDs to include - */ - any_?: string[] | null; - }; - /** - * ArtifactFilterType - * @description Filter by `Artifact.type`. - */ - ArtifactFilterType: { - /** - * Any - * @description A list of artifact types to include - */ - any_?: string[] | null; - /** - * Not Any - * @description A list of artifact types to exclude - */ - not_any_?: string[] | null; - }; - /** - * ArtifactSort - * @description Defines artifact sorting options. - * @enum {string} - */ - ArtifactSort: - | "CREATED_DESC" - | "UPDATED_DESC" - | "ID_DESC" - | "KEY_DESC" - | "KEY_ASC"; - /** - * ArtifactUpdate - * @description Data used by the Prefect REST API to update an artifact. - */ - ArtifactUpdate: { - /** Data */ - data?: Record | unknown | null; - /** Description */ - description?: string | null; - /** Metadata */ - metadata_?: { - [key: string]: string; - } | null; - }; - /** Automation */ - Automation: { - /** - * Name - * @description The name of this automation - */ - name: string; - /** - * Description - * @description A longer description of this automation - * @default - */ - description: string; - /** - * Enabled - * @description Whether this automation will be evaluated - * @default true - */ - enabled: boolean; - /** - * Trigger - * @description The criteria for which events this Automation covers and how it will respond to the presence or absence of those events - */ - trigger: - | components["schemas"]["EventTrigger"] - | components["schemas"]["CompoundTrigger-Output"] - | components["schemas"]["SequenceTrigger-Output"]; - /** - * Actions - * @description The actions to perform when this Automation triggers - */ - actions: ( - | components["schemas"]["DoNothing"] - | components["schemas"]["RunDeployment"] - | components["schemas"]["PauseDeployment"] - | components["schemas"]["ResumeDeployment"] - | components["schemas"]["CancelFlowRun"] - | components["schemas"]["ChangeFlowRunState"] - | components["schemas"]["PauseWorkQueue"] - | components["schemas"]["ResumeWorkQueue"] - | components["schemas"]["SendNotification"] - | components["schemas"]["CallWebhook"] - | components["schemas"]["PauseAutomation"] - | components["schemas"]["ResumeAutomation"] - | components["schemas"]["SuspendFlowRun"] - | components["schemas"]["ResumeFlowRun"] - | components["schemas"]["PauseWorkPool"] - | components["schemas"]["ResumeWorkPool"] - )[]; - /** - * Actions On Trigger - * @description The actions to perform when an Automation goes into a triggered state - */ - actions_on_trigger?: ( - | components["schemas"]["DoNothing"] - | components["schemas"]["RunDeployment"] - | components["schemas"]["PauseDeployment"] - | components["schemas"]["ResumeDeployment"] - | components["schemas"]["CancelFlowRun"] - | components["schemas"]["ChangeFlowRunState"] - | components["schemas"]["PauseWorkQueue"] - | components["schemas"]["ResumeWorkQueue"] - | components["schemas"]["SendNotification"] - | components["schemas"]["CallWebhook"] - | components["schemas"]["PauseAutomation"] - | components["schemas"]["ResumeAutomation"] - | components["schemas"]["SuspendFlowRun"] - | components["schemas"]["ResumeFlowRun"] - | components["schemas"]["PauseWorkPool"] - | components["schemas"]["ResumeWorkPool"] - )[]; - /** - * Actions On Resolve - * @description The actions to perform when an Automation goes into a resolving state - */ - actions_on_resolve?: ( - | components["schemas"]["DoNothing"] - | components["schemas"]["RunDeployment"] - | components["schemas"]["PauseDeployment"] - | components["schemas"]["ResumeDeployment"] - | components["schemas"]["CancelFlowRun"] - | components["schemas"]["ChangeFlowRunState"] - | components["schemas"]["PauseWorkQueue"] - | components["schemas"]["ResumeWorkQueue"] - | components["schemas"]["SendNotification"] - | components["schemas"]["CallWebhook"] - | components["schemas"]["PauseAutomation"] - | components["schemas"]["ResumeAutomation"] - | components["schemas"]["SuspendFlowRun"] - | components["schemas"]["ResumeFlowRun"] - | components["schemas"]["PauseWorkPool"] - | components["schemas"]["ResumeWorkPool"] - )[]; - /** - * Id - * Format: uuid - */ - id?: string; - /** Created */ - created?: string | null; - /** Updated */ - updated?: string | null; - }; - /** AutomationCreate */ - AutomationCreate: { - /** - * Name - * @description The name of this automation - */ - name: string; - /** - * Description - * @description A longer description of this automation - * @default - */ - description: string; - /** - * Enabled - * @description Whether this automation will be evaluated - * @default true - */ - enabled: boolean; - /** - * Trigger - * @description The criteria for which events this Automation covers and how it will respond to the presence or absence of those events - */ - trigger: - | components["schemas"]["EventTrigger"] - | components["schemas"]["CompoundTrigger-Input"] - | components["schemas"]["SequenceTrigger-Input"]; - /** - * Actions - * @description The actions to perform when this Automation triggers - */ - actions: ( - | components["schemas"]["DoNothing"] - | components["schemas"]["RunDeployment"] - | components["schemas"]["PauseDeployment"] - | components["schemas"]["ResumeDeployment"] - | components["schemas"]["CancelFlowRun"] - | components["schemas"]["ChangeFlowRunState"] - | components["schemas"]["PauseWorkQueue"] - | components["schemas"]["ResumeWorkQueue"] - | components["schemas"]["SendNotification"] - | components["schemas"]["CallWebhook"] - | components["schemas"]["PauseAutomation"] - | components["schemas"]["ResumeAutomation"] - | components["schemas"]["SuspendFlowRun"] - | components["schemas"]["ResumeFlowRun"] - | components["schemas"]["PauseWorkPool"] - | components["schemas"]["ResumeWorkPool"] - )[]; - /** - * Actions On Trigger - * @description The actions to perform when an Automation goes into a triggered state - */ - actions_on_trigger?: ( - | components["schemas"]["DoNothing"] - | components["schemas"]["RunDeployment"] - | components["schemas"]["PauseDeployment"] - | components["schemas"]["ResumeDeployment"] - | components["schemas"]["CancelFlowRun"] - | components["schemas"]["ChangeFlowRunState"] - | components["schemas"]["PauseWorkQueue"] - | components["schemas"]["ResumeWorkQueue"] - | components["schemas"]["SendNotification"] - | components["schemas"]["CallWebhook"] - | components["schemas"]["PauseAutomation"] - | components["schemas"]["ResumeAutomation"] - | components["schemas"]["SuspendFlowRun"] - | components["schemas"]["ResumeFlowRun"] - | components["schemas"]["PauseWorkPool"] - | components["schemas"]["ResumeWorkPool"] - )[]; - /** - * Actions On Resolve - * @description The actions to perform when an Automation goes into a resolving state - */ - actions_on_resolve?: ( - | components["schemas"]["DoNothing"] - | components["schemas"]["RunDeployment"] - | components["schemas"]["PauseDeployment"] - | components["schemas"]["ResumeDeployment"] - | components["schemas"]["CancelFlowRun"] - | components["schemas"]["ChangeFlowRunState"] - | components["schemas"]["PauseWorkQueue"] - | components["schemas"]["ResumeWorkQueue"] - | components["schemas"]["SendNotification"] - | components["schemas"]["CallWebhook"] - | components["schemas"]["PauseAutomation"] - | components["schemas"]["ResumeAutomation"] - | components["schemas"]["SuspendFlowRun"] - | components["schemas"]["ResumeFlowRun"] - | components["schemas"]["PauseWorkPool"] - | components["schemas"]["ResumeWorkPool"] - )[]; - /** - * Owner Resource - * @description The resource to which this automation belongs - */ - owner_resource?: string | null; - }; - /** AutomationFilter */ - AutomationFilter: { - /** - * @description Operator for combining filter criteria. Defaults to 'and_'. - * @default and_ - */ - operator: components["schemas"]["Operator"]; - /** @description Filter criteria for `Automation.name` */ - name?: components["schemas"]["AutomationFilterName"] | null; - /** @description Filter criteria for `Automation.created` */ - created?: components["schemas"]["AutomationFilterCreated"] | null; - }; - /** - * AutomationFilterCreated - * @description Filter by `Automation.created`. - */ - AutomationFilterCreated: { - /** - * Before - * @description Only include automations created before this datetime - */ - before_?: string | null; - }; - /** - * AutomationFilterName - * @description Filter by `Automation.created`. - */ - AutomationFilterName: { - /** - * Any - * @description Only include automations with names that match any of these strings - */ - any_?: string[] | null; - }; - /** AutomationPartialUpdate */ - AutomationPartialUpdate: { - /** - * Enabled - * @description Whether this automation will be evaluated - * @default true - */ - enabled: boolean; - }; - /** - * AutomationSort - * @description Defines automations sorting options. - * @enum {string} - */ - AutomationSort: "CREATED_DESC" | "UPDATED_DESC" | "NAME_ASC" | "NAME_DESC"; - /** AutomationUpdate */ - AutomationUpdate: { - /** - * Name - * @description The name of this automation - */ - name: string; - /** - * Description - * @description A longer description of this automation - * @default - */ - description: string; - /** - * Enabled - * @description Whether this automation will be evaluated - * @default true - */ - enabled: boolean; - /** - * Trigger - * @description The criteria for which events this Automation covers and how it will respond to the presence or absence of those events - */ - trigger: - | components["schemas"]["EventTrigger"] - | components["schemas"]["CompoundTrigger-Input"] - | components["schemas"]["SequenceTrigger-Input"]; - /** - * Actions - * @description The actions to perform when this Automation triggers - */ - actions: ( - | components["schemas"]["DoNothing"] - | components["schemas"]["RunDeployment"] - | components["schemas"]["PauseDeployment"] - | components["schemas"]["ResumeDeployment"] - | components["schemas"]["CancelFlowRun"] - | components["schemas"]["ChangeFlowRunState"] - | components["schemas"]["PauseWorkQueue"] - | components["schemas"]["ResumeWorkQueue"] - | components["schemas"]["SendNotification"] - | components["schemas"]["CallWebhook"] - | components["schemas"]["PauseAutomation"] - | components["schemas"]["ResumeAutomation"] - | components["schemas"]["SuspendFlowRun"] - | components["schemas"]["ResumeFlowRun"] - | components["schemas"]["PauseWorkPool"] - | components["schemas"]["ResumeWorkPool"] - )[]; - /** - * Actions On Trigger - * @description The actions to perform when an Automation goes into a triggered state - */ - actions_on_trigger?: ( - | components["schemas"]["DoNothing"] - | components["schemas"]["RunDeployment"] - | components["schemas"]["PauseDeployment"] - | components["schemas"]["ResumeDeployment"] - | components["schemas"]["CancelFlowRun"] - | components["schemas"]["ChangeFlowRunState"] - | components["schemas"]["PauseWorkQueue"] - | components["schemas"]["ResumeWorkQueue"] - | components["schemas"]["SendNotification"] - | components["schemas"]["CallWebhook"] - | components["schemas"]["PauseAutomation"] - | components["schemas"]["ResumeAutomation"] - | components["schemas"]["SuspendFlowRun"] - | components["schemas"]["ResumeFlowRun"] - | components["schemas"]["PauseWorkPool"] - | components["schemas"]["ResumeWorkPool"] - )[]; - /** - * Actions On Resolve - * @description The actions to perform when an Automation goes into a resolving state - */ - actions_on_resolve?: ( - | components["schemas"]["DoNothing"] - | components["schemas"]["RunDeployment"] - | components["schemas"]["PauseDeployment"] - | components["schemas"]["ResumeDeployment"] - | components["schemas"]["CancelFlowRun"] - | components["schemas"]["ChangeFlowRunState"] - | components["schemas"]["PauseWorkQueue"] - | components["schemas"]["ResumeWorkQueue"] - | components["schemas"]["SendNotification"] - | components["schemas"]["CallWebhook"] - | components["schemas"]["PauseAutomation"] - | components["schemas"]["ResumeAutomation"] - | components["schemas"]["SuspendFlowRun"] - | components["schemas"]["ResumeFlowRun"] - | components["schemas"]["PauseWorkPool"] - | components["schemas"]["ResumeWorkPool"] - )[]; - }; - /** - * BlockDocument - * @description An ORM representation of a block document. - */ - BlockDocument: { - /** - * Id - * Format: uuid - */ - id?: string; - /** Created */ - created?: string | null; - /** Updated */ - updated?: string | null; - /** - * Name - * @description The block document's name. Not required for anonymous block documents. - */ - name?: string | null; - /** - * Data - * @description The block document's data - */ - data?: Record; - /** - * Block Schema Id - * Format: uuid - * @description A block schema ID - */ - block_schema_id: string; - /** @description The associated block schema */ - block_schema?: components["schemas"]["BlockSchema"] | null; - /** - * Block Type Id - * Format: uuid - * @description A block type ID - */ - block_type_id: string; - /** - * Block Type Name - * @description The associated block type's name - */ - block_type_name?: string | null; - /** @description The associated block type */ - block_type?: components["schemas"]["BlockType"] | null; - /** - * Block Document References - * @description Record of the block document's references - */ - block_document_references?: { - [key: string]: Record; - }; - /** - * Is Anonymous - * @description Whether the block is anonymous (anonymous blocks are usually created by Prefect automatically) - * @default false - */ - is_anonymous: boolean; - }; - /** - * BlockDocumentCreate - * @description Data used by the Prefect REST API to create a block document. - */ - BlockDocumentCreate: { - /** - * Name - * @description The block document's name. Not required for anonymous block documents. - */ - name?: string | null; - /** - * Data - * @description The block document's data - */ - data?: Record; - /** - * Block Schema Id - * Format: uuid - * @description A block schema ID - */ - block_schema_id: string; - /** - * Block Type Id - * Format: uuid - * @description A block type ID - */ - block_type_id: string; - /** - * Is Anonymous - * @description Whether the block is anonymous (anonymous blocks are usually created by Prefect automatically) - * @default false - */ - is_anonymous: boolean; - }; - /** - * BlockDocumentFilter - * @description Filter BlockDocuments. Only BlockDocuments matching all criteria will be returned - */ - BlockDocumentFilter: { - /** - * @description Operator for combining filter criteria. Defaults to 'and_'. - * @default and_ - */ - operator: components["schemas"]["Operator"]; - /** @description Filter criteria for `BlockDocument.id` */ - id?: components["schemas"]["BlockDocumentFilterId"] | null; - /** - * @description Filter criteria for `BlockDocument.is_anonymous`. Defaults to excluding anonymous blocks. - * @default { - * "eq_": false - * } - */ - is_anonymous: - | components["schemas"]["BlockDocumentFilterIsAnonymous"] - | null; - /** @description Filter criteria for `BlockDocument.block_type_id` */ - block_type_id?: - | components["schemas"]["BlockDocumentFilterBlockTypeId"] - | null; - /** @description Filter criteria for `BlockDocument.name` */ - name?: components["schemas"]["BlockDocumentFilterName"] | null; - }; - /** - * BlockDocumentFilterBlockTypeId - * @description Filter by `BlockDocument.block_type_id`. - */ - BlockDocumentFilterBlockTypeId: { - /** - * Any - * @description A list of block type ids to include - */ - any_?: string[] | null; - }; - /** - * BlockDocumentFilterId - * @description Filter by `BlockDocument.id`. - */ - BlockDocumentFilterId: { - /** - * Any - * @description A list of block ids to include - */ - any_?: string[] | null; - }; - /** - * BlockDocumentFilterIsAnonymous - * @description Filter by `BlockDocument.is_anonymous`. - */ - BlockDocumentFilterIsAnonymous: { - /** - * Eq - * @description Filter block documents for only those that are or are not anonymous. - */ - eq_?: boolean | null; - }; - /** - * BlockDocumentFilterName - * @description Filter by `BlockDocument.name`. - */ - BlockDocumentFilterName: { - /** - * Any - * @description A list of block names to include - */ - any_?: string[] | null; - /** - * Like - * @description A string to match block names against. This can include SQL wildcard characters like `%` and `_`. - */ - like_?: string | null; - }; - /** - * BlockDocumentSort - * @description Defines block document sorting options. - * @enum {string} - */ - BlockDocumentSort: "NAME_DESC" | "NAME_ASC" | "BLOCK_TYPE_AND_NAME_ASC"; - /** - * BlockDocumentUpdate - * @description Data used by the Prefect REST API to update a block document. - */ - BlockDocumentUpdate: { - /** - * Block Schema Id - * @description A block schema ID - */ - block_schema_id?: string | null; - /** - * Data - * @description The block document's data - */ - data?: Record; - /** - * Merge Existing Data - * @default true - */ - merge_existing_data: boolean; - }; - /** - * BlockSchema - * @description An ORM representation of a block schema. - */ - BlockSchema: { - /** - * Id - * Format: uuid - */ - id?: string; - /** Created */ - created?: string | null; - /** Updated */ - updated?: string | null; - /** - * Checksum - * @description The block schema's unique checksum - */ - checksum: string; - /** - * Fields - * @description The block schema's field schema - */ - fields?: Record; - /** - * Block Type Id - * @description A block type ID - */ - block_type_id: string | null; - /** @description The associated block type */ - block_type?: components["schemas"]["BlockType"] | null; - /** - * Capabilities - * @description A list of Block capabilities - */ - capabilities?: string[]; - /** - * Version - * @description Human readable identifier for the block schema - * @default non-versioned - */ - version: string; - }; - /** - * BlockSchemaCreate - * @description Data used by the Prefect REST API to create a block schema. - */ - BlockSchemaCreate: { - /** - * Fields - * @description The block schema's field schema - */ - fields?: Record; - /** - * Block Type Id - * Format: uuid - * @description A block type ID - */ - block_type_id: string; - /** - * Capabilities - * @description A list of Block capabilities - */ - capabilities?: string[]; - /** - * Version - * @description Human readable identifier for the block schema - * @default non-versioned - */ - version: string; - }; - /** - * BlockSchemaFilter - * @description Filter BlockSchemas - */ - BlockSchemaFilter: { - /** - * @description Operator for combining filter criteria. Defaults to 'and_'. - * @default and_ - */ - operator: components["schemas"]["Operator"]; - /** @description Filter criteria for `BlockSchema.block_type_id` */ - block_type_id?: - | components["schemas"]["BlockSchemaFilterBlockTypeId"] - | null; - /** @description Filter criteria for `BlockSchema.capabilities` */ - block_capabilities?: - | components["schemas"]["BlockSchemaFilterCapabilities"] - | null; - /** @description Filter criteria for `BlockSchema.id` */ - id?: components["schemas"]["BlockSchemaFilterId"] | null; - /** @description Filter criteria for `BlockSchema.version` */ - version?: components["schemas"]["BlockSchemaFilterVersion"] | null; - }; - /** - * BlockSchemaFilterBlockTypeId - * @description Filter by `BlockSchema.block_type_id`. - */ - BlockSchemaFilterBlockTypeId: { - /** - * Any - * @description A list of block type ids to include - */ - any_?: string[] | null; - }; - /** - * BlockSchemaFilterCapabilities - * @description Filter by `BlockSchema.capabilities` - */ - BlockSchemaFilterCapabilities: { - /** - * All - * @description A list of block capabilities. Block entities will be returned only if an associated block schema has a superset of the defined capabilities. - */ - all_?: string[] | null; - }; - /** - * BlockSchemaFilterId - * @description Filter by BlockSchema.id - */ - BlockSchemaFilterId: { - /** - * Any - * @description A list of IDs to include - */ - any_?: string[] | null; - }; - /** - * BlockSchemaFilterVersion - * @description Filter by `BlockSchema.capabilities` - */ - BlockSchemaFilterVersion: { - /** - * Any - * @description A list of block schema versions. - */ - any_?: string[] | null; - }; - /** - * BlockType - * @description An ORM representation of a block type - */ - BlockType: { - /** - * Id - * Format: uuid - */ - id?: string; - /** Created */ - created?: string | null; - /** Updated */ - updated?: string | null; - /** - * Name - * @description A block type's name - */ - name: string; - /** - * Slug - * @description A block type's slug - */ - slug: string; - /** - * Logo Url - * @description Web URL for the block type's logo - */ - logo_url?: string | null; - /** - * Documentation Url - * @description Web URL for the block type's documentation - */ - documentation_url?: string | null; - /** - * Description - * @description A short blurb about the corresponding block's intended use - */ - description?: string | null; - /** - * Code Example - * @description A code snippet demonstrating use of the corresponding block - */ - code_example?: string | null; - /** - * Is Protected - * @description Protected block types cannot be modified via API. - * @default false - */ - is_protected: boolean; - }; - /** - * BlockTypeCreate - * @description Data used by the Prefect REST API to create a block type. - */ - BlockTypeCreate: { - /** - * Name - * @description A block type's name - */ - name: string; - /** - * Slug - * @description A block type's slug - */ - slug: string; - /** - * Logo Url - * @description Web URL for the block type's logo - */ - logo_url?: string | null; - /** - * Documentation Url - * @description Web URL for the block type's documentation - */ - documentation_url?: string | null; - /** - * Description - * @description A short blurb about the corresponding block's intended use - */ - description?: string | null; - /** - * Code Example - * @description A code snippet demonstrating use of the corresponding block - */ - code_example?: string | null; - }; - /** - * BlockTypeFilter - * @description Filter BlockTypes - */ - BlockTypeFilter: { - /** @description Filter criteria for `BlockType.name` */ - name?: components["schemas"]["BlockTypeFilterName"] | null; - /** @description Filter criteria for `BlockType.slug` */ - slug?: components["schemas"]["BlockTypeFilterSlug"] | null; - }; - /** - * BlockTypeFilterName - * @description Filter by `BlockType.name` - */ - BlockTypeFilterName: { - /** - * Like - * @description A case-insensitive partial match. For example, passing 'marvin' will match 'marvin', 'sad-Marvin', and 'marvin-robot'. - */ - like_?: string | null; - }; - /** - * BlockTypeFilterSlug - * @description Filter by `BlockType.slug` - */ - BlockTypeFilterSlug: { - /** - * Any - * @description A list of slugs to match - */ - any_?: string[] | null; - }; - /** - * BlockTypeUpdate - * @description Data used by the Prefect REST API to update a block type. - */ - BlockTypeUpdate: { - /** Logo Url */ - logo_url?: string | null; - /** Documentation Url */ - documentation_url?: string | null; - /** Description */ - description?: string | null; - /** Code Example */ - code_example?: string | null; - }; - /** Body_average_flow_run_lateness_flow_runs_lateness_post */ - Body_average_flow_run_lateness_flow_runs_lateness_post: { - flows?: components["schemas"]["FlowFilter"] | null; - flow_runs?: components["schemas"]["FlowRunFilter"] | null; - task_runs?: components["schemas"]["TaskRunFilter"] | null; - deployments?: components["schemas"]["DeploymentFilter"] | null; - work_pools?: components["schemas"]["WorkPoolFilter"] | null; - work_pool_queues?: components["schemas"]["WorkQueueFilter"] | null; - }; - /** Body_bulk_decrement_active_slots_v2_concurrency_limits_decrement_post */ - Body_bulk_decrement_active_slots_v2_concurrency_limits_decrement_post: { - /** Slots */ - slots: number; - /** Names */ - names: string[]; - /** Occupancy Seconds */ - occupancy_seconds?: number | null; - /** - * Create If Missing - * @default true - */ - create_if_missing: boolean; - }; - /** Body_bulk_increment_active_slots_v2_concurrency_limits_increment_post */ - Body_bulk_increment_active_slots_v2_concurrency_limits_increment_post: { - /** Slots */ - slots: number; - /** Names */ - names: string[]; - /** - * Mode - * @default concurrency - * @enum {string} - */ - mode: "concurrency" | "rate_limit"; - /** Create If Missing */ - create_if_missing?: boolean | null; - }; - /** Body_clear_database_admin_database_clear_post */ - Body_clear_database_admin_database_clear_post: { - /** - * Confirm - * @description Pass confirm=True to confirm you want to modify the database. - * @default false - */ - confirm: boolean; - }; - /** Body_count_account_events_events_count_by__countable__post */ - Body_count_account_events_events_count_by__countable__post: { - filter: components["schemas"]["EventFilter"]; - /** @default day */ - time_unit: components["schemas"]["TimeUnit"]; - /** - * Time Interval - * @default 1 - */ - time_interval: number; - }; - /** Body_count_artifacts_artifacts_count_post */ - Body_count_artifacts_artifacts_count_post: { - artifacts?: components["schemas"]["ArtifactFilter"]; - flow_runs?: components["schemas"]["FlowRunFilter"]; - task_runs?: components["schemas"]["TaskRunFilter"]; - flows?: components["schemas"]["FlowFilter"]; - deployments?: components["schemas"]["DeploymentFilter"]; - }; - /** Body_count_block_documents_block_documents_count_post */ - Body_count_block_documents_block_documents_count_post: { - block_documents?: components["schemas"]["BlockDocumentFilter"] | null; - block_types?: components["schemas"]["BlockTypeFilter"] | null; - block_schemas?: components["schemas"]["BlockSchemaFilter"] | null; - }; - /** Body_count_deployments_by_flow_ui_flows_count_deployments_post */ - Body_count_deployments_by_flow_ui_flows_count_deployments_post: { - /** Flow Ids */ - flow_ids: string[]; - }; - /** Body_count_deployments_deployments_count_post */ - Body_count_deployments_deployments_count_post: { - flows?: components["schemas"]["FlowFilter"]; - flow_runs?: components["schemas"]["FlowRunFilter"]; - task_runs?: components["schemas"]["TaskRunFilter"]; - deployments?: components["schemas"]["DeploymentFilter"]; - work_pools?: components["schemas"]["WorkPoolFilter"]; - work_pool_queues?: components["schemas"]["WorkQueueFilter"]; - }; - /** Body_count_flow_runs_flow_runs_count_post */ - Body_count_flow_runs_flow_runs_count_post: { - flows?: components["schemas"]["FlowFilter"]; - flow_runs?: components["schemas"]["FlowRunFilter"]; - task_runs?: components["schemas"]["TaskRunFilter"]; - deployments?: components["schemas"]["DeploymentFilter"]; - work_pools?: components["schemas"]["WorkPoolFilter"]; - work_pool_queues?: components["schemas"]["WorkQueueFilter"]; - }; - /** Body_count_flows_flows_count_post */ - Body_count_flows_flows_count_post: { - flows?: components["schemas"]["FlowFilter"]; - flow_runs?: components["schemas"]["FlowRunFilter"]; - task_runs?: components["schemas"]["TaskRunFilter"]; - deployments?: components["schemas"]["DeploymentFilter"]; - work_pools?: components["schemas"]["WorkPoolFilter"]; - }; - /** Body_count_latest_artifacts_artifacts_latest_count_post */ - Body_count_latest_artifacts_artifacts_latest_count_post: { - artifacts?: components["schemas"]["ArtifactCollectionFilter"]; - flow_runs?: components["schemas"]["FlowRunFilter"]; - task_runs?: components["schemas"]["TaskRunFilter"]; - flows?: components["schemas"]["FlowFilter"]; - deployments?: components["schemas"]["DeploymentFilter"]; - }; - /** Body_count_task_runs_by_flow_run_ui_flow_runs_count_task_runs_post */ - Body_count_task_runs_by_flow_run_ui_flow_runs_count_task_runs_post: { - /** Flow Run Ids */ - flow_run_ids: string[]; - }; - /** Body_count_task_runs_task_runs_count_post */ - Body_count_task_runs_task_runs_count_post: { - flows?: components["schemas"]["FlowFilter"]; - flow_runs?: components["schemas"]["FlowRunFilter"]; - task_runs?: components["schemas"]["TaskRunFilter"]; - deployments?: components["schemas"]["DeploymentFilter"]; - }; - /** Body_count_variables_variables_count_post */ - Body_count_variables_variables_count_post: { - variables?: components["schemas"]["VariableFilter"] | null; - }; - /** Body_count_work_pools_work_pools_count_post */ - Body_count_work_pools_work_pools_count_post: { - work_pools?: components["schemas"]["WorkPoolFilter"] | null; - }; - /** Body_create_database_admin_database_create_post */ - Body_create_database_admin_database_create_post: { - /** - * Confirm - * @description Pass confirm=True to confirm you want to modify the database. - * @default false - */ - confirm: boolean; - }; - /** Body_create_flow_run_input_flow_runs__id__input_post */ - Body_create_flow_run_input_flow_runs__id__input_post: { - /** - * Key - * @description The input key - */ - key: string; - /** - * Value - * Format: binary - * @description The value of the input - */ - value: string; - /** - * Sender - * @description The sender of the input - */ - sender?: string | null; - }; - /** Body_decrement_concurrency_limits_v1_concurrency_limits_decrement_post */ - Body_decrement_concurrency_limits_v1_concurrency_limits_decrement_post: { - /** - * Names - * @description The tags to release a slot for - */ - names: string[]; - /** - * Task Run Id - * Format: uuid - * @description The ID of the task run releasing the slot - */ - task_run_id: string; - }; - /** Body_drop_database_admin_database_drop_post */ - Body_drop_database_admin_database_drop_post: { - /** - * Confirm - * @description Pass confirm=True to confirm you want to modify the database. - * @default false - */ - confirm: boolean; - }; - /** Body_filter_flow_run_input_flow_runs__id__input_filter_post */ - Body_filter_flow_run_input_flow_runs__id__input_filter_post: { - /** - * Prefix - * @description The input key prefix - */ - prefix: string; - /** - * Limit - * @description The maximum number of results to return - * @default 1 - */ - limit: number; - /** - * Exclude Keys - * @description Exclude inputs with these keys - * @default [] - */ - exclude_keys: string[]; - }; - /** Body_flow_run_history_flow_runs_history_post */ - Body_flow_run_history_flow_runs_history_post: { - /** - * History Start - * Format: date-time - * @description The history's start time. - */ - history_start: string; - /** - * History End - * Format: date-time - * @description The history's end time. - */ - history_end: string; - /** - * History Interval - * Format: time-delta - * @description The size of each history interval, in seconds. Must be at least 1 second. - */ - history_interval: number; - flows?: components["schemas"]["FlowFilter"]; - flow_runs?: components["schemas"]["FlowRunFilter"]; - task_runs?: components["schemas"]["TaskRunFilter"]; - deployments?: components["schemas"]["DeploymentFilter"]; - work_pools?: components["schemas"]["WorkPoolFilter"]; - work_queues?: components["schemas"]["WorkQueueFilter"]; - }; - /** Body_get_scheduled_flow_runs_for_deployments_deployments_get_scheduled_flow_runs_post */ - Body_get_scheduled_flow_runs_for_deployments_deployments_get_scheduled_flow_runs_post: { - /** - * Deployment Ids - * @description The deployment IDs to get scheduled runs for - */ - deployment_ids: string[]; - /** - * Scheduled Before - * Format: date-time - * @description The maximum time to look for scheduled flow runs - */ - scheduled_before?: string; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_get_scheduled_flow_runs_work_pools__name__get_scheduled_flow_runs_post */ - Body_get_scheduled_flow_runs_work_pools__name__get_scheduled_flow_runs_post: { - /** - * Work Queue Names - * @description The names of work pool queues - */ - work_queue_names?: string[]; - /** - * Scheduled Before - * Format: date-time - * @description The maximum time to look for scheduled flow runs - */ - scheduled_before?: string; - /** - * Scheduled After - * Format: date-time - * @description The minimum time to look for scheduled flow runs - */ - scheduled_after?: string; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_increment_concurrency_limits_v1_concurrency_limits_increment_post */ - Body_increment_concurrency_limits_v1_concurrency_limits_increment_post: { - /** - * Names - * @description The tags to acquire a slot for - */ - names: string[]; - /** - * Task Run Id - * Format: uuid - * @description The ID of the task run acquiring the slot - */ - task_run_id: string; - }; - /** Body_next_runs_by_flow_ui_flows_next_runs_post */ - Body_next_runs_by_flow_ui_flows_next_runs_post: { - /** Flow Ids */ - flow_ids: string[]; - }; - /** Body_paginate_deployments_deployments_paginate_post */ - Body_paginate_deployments_deployments_paginate_post: { - /** - * Page - * @default 1 - */ - page: number; - flows?: components["schemas"]["FlowFilter"]; - flow_runs?: components["schemas"]["FlowRunFilter"]; - task_runs?: components["schemas"]["TaskRunFilter"]; - deployments?: components["schemas"]["DeploymentFilter"]; - work_pools?: components["schemas"]["WorkPoolFilter"]; - work_pool_queues?: components["schemas"]["WorkQueueFilter"]; - /** @default NAME_ASC */ - sort: components["schemas"]["DeploymentSort"]; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_paginate_flow_runs_flow_runs_paginate_post */ - Body_paginate_flow_runs_flow_runs_paginate_post: { - /** @default ID_DESC */ - sort: components["schemas"]["FlowRunSort"]; - /** - * Page - * @default 1 - */ - page: number; - flows?: components["schemas"]["FlowFilter"] | null; - flow_runs?: components["schemas"]["FlowRunFilter"] | null; - task_runs?: components["schemas"]["TaskRunFilter"] | null; - deployments?: components["schemas"]["DeploymentFilter"] | null; - work_pools?: components["schemas"]["WorkPoolFilter"] | null; - work_pool_queues?: components["schemas"]["WorkQueueFilter"] | null; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_paginate_flows_flows_paginate_post */ - Body_paginate_flows_flows_paginate_post: { - /** - * Page - * @default 1 - */ - page: number; - flows?: components["schemas"]["FlowFilter"] | null; - flow_runs?: components["schemas"]["FlowRunFilter"] | null; - task_runs?: components["schemas"]["TaskRunFilter"] | null; - deployments?: components["schemas"]["DeploymentFilter"] | null; - work_pools?: components["schemas"]["WorkPoolFilter"] | null; - /** @default NAME_ASC */ - sort: components["schemas"]["FlowSort"]; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_all_concurrency_limits_v2_v2_concurrency_limits_filter_post */ - Body_read_all_concurrency_limits_v2_v2_concurrency_limits_filter_post: { - /** - * Offset - * @default 0 - */ - offset: number; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_artifacts_artifacts_filter_post */ - Body_read_artifacts_artifacts_filter_post: { - /** @default ID_DESC */ - sort: components["schemas"]["ArtifactSort"]; - /** - * Offset - * @default 0 - */ - offset: number; - artifacts?: components["schemas"]["ArtifactFilter"]; - flow_runs?: components["schemas"]["FlowRunFilter"]; - task_runs?: components["schemas"]["TaskRunFilter"]; - flows?: components["schemas"]["FlowFilter"]; - deployments?: components["schemas"]["DeploymentFilter"]; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_automations_automations_filter_post */ - Body_read_automations_automations_filter_post: { - /** @default NAME_ASC */ - sort: components["schemas"]["AutomationSort"]; - /** - * Offset - * @default 0 - */ - offset: number; - automations?: components["schemas"]["AutomationFilter"] | null; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_block_documents_block_documents_filter_post */ - Body_read_block_documents_block_documents_filter_post: { - block_documents?: components["schemas"]["BlockDocumentFilter"] | null; - block_types?: components["schemas"]["BlockTypeFilter"] | null; - block_schemas?: components["schemas"]["BlockSchemaFilter"] | null; - /** - * Include Secrets - * @description Whether to include sensitive values in the block document. - * @default false - */ - include_secrets: boolean; - /** @default NAME_ASC */ - sort: components["schemas"]["BlockDocumentSort"] | null; - /** - * Offset - * @default 0 - */ - offset: number; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_block_schemas_block_schemas_filter_post */ - Body_read_block_schemas_block_schemas_filter_post: { - block_schemas?: components["schemas"]["BlockSchemaFilter"] | null; - /** - * Offset - * @default 0 - */ - offset: number; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_block_types_block_types_filter_post */ - Body_read_block_types_block_types_filter_post: { - block_types?: components["schemas"]["BlockTypeFilter"] | null; - block_schemas?: components["schemas"]["BlockSchemaFilter"] | null; - /** - * Offset - * @default 0 - */ - offset: number; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_concurrency_limits_concurrency_limits_filter_post */ - Body_read_concurrency_limits_concurrency_limits_filter_post: { - /** - * Offset - * @default 0 - */ - offset: number; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_dashboard_task_run_counts_ui_task_runs_dashboard_counts_post */ - Body_read_dashboard_task_run_counts_ui_task_runs_dashboard_counts_post: { - task_runs: components["schemas"]["TaskRunFilter"]; - flows?: components["schemas"]["FlowFilter"] | null; - flow_runs?: components["schemas"]["FlowRunFilter"] | null; - deployments?: components["schemas"]["DeploymentFilter"] | null; - work_pools?: components["schemas"]["WorkPoolFilter"] | null; - work_queues?: components["schemas"]["WorkQueueFilter"] | null; - }; - /** Body_read_deployments_deployments_filter_post */ - Body_read_deployments_deployments_filter_post: { - /** - * Offset - * @default 0 - */ - offset: number; - flows?: components["schemas"]["FlowFilter"]; - flow_runs?: components["schemas"]["FlowRunFilter"]; - task_runs?: components["schemas"]["TaskRunFilter"]; - deployments?: components["schemas"]["DeploymentFilter"]; - work_pools?: components["schemas"]["WorkPoolFilter"]; - work_pool_queues?: components["schemas"]["WorkQueueFilter"]; - /** @default NAME_ASC */ - sort: components["schemas"]["DeploymentSort"]; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_events_events_filter_post */ - Body_read_events_events_filter_post: { - /** @description Additional optional filter criteria to narrow down the set of Events */ - filter?: components["schemas"]["EventFilter"] | null; - /** - * Limit - * @description The number of events to return with each page - * @default 50 - */ - limit: number; - }; - /** Body_read_flow_run_history_ui_flow_runs_history_post */ - Body_read_flow_run_history_ui_flow_runs_history_post: { - /** @default EXPECTED_START_TIME_DESC */ - sort: components["schemas"]["FlowRunSort"]; - /** - * Limit - * @default 1000 - */ - limit: number; - /** - * Offset - * @default 0 - */ - offset: number; - flows?: components["schemas"]["FlowFilter"]; - flow_runs?: components["schemas"]["FlowRunFilter"]; - task_runs?: components["schemas"]["TaskRunFilter"]; - deployments?: components["schemas"]["DeploymentFilter"]; - work_pools?: components["schemas"]["WorkPoolFilter"]; - }; - /** Body_read_flow_run_notification_policies_flow_run_notification_policies_filter_post */ - Body_read_flow_run_notification_policies_flow_run_notification_policies_filter_post: { - flow_run_notification_policy_filter?: components["schemas"]["FlowRunNotificationPolicyFilter"]; - /** - * Offset - * @default 0 - */ - offset: number; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_flow_runs_flow_runs_filter_post */ - Body_read_flow_runs_flow_runs_filter_post: { - /** @default ID_DESC */ - sort: components["schemas"]["FlowRunSort"]; - /** - * Offset - * @default 0 - */ - offset: number; - flows?: components["schemas"]["FlowFilter"] | null; - flow_runs?: components["schemas"]["FlowRunFilter"] | null; - task_runs?: components["schemas"]["TaskRunFilter"] | null; - deployments?: components["schemas"]["DeploymentFilter"] | null; - work_pools?: components["schemas"]["WorkPoolFilter"] | null; - work_pool_queues?: components["schemas"]["WorkQueueFilter"] | null; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_flows_flows_filter_post */ - Body_read_flows_flows_filter_post: { - /** - * Offset - * @default 0 - */ - offset: number; - flows?: components["schemas"]["FlowFilter"]; - flow_runs?: components["schemas"]["FlowRunFilter"]; - task_runs?: components["schemas"]["TaskRunFilter"]; - deployments?: components["schemas"]["DeploymentFilter"]; - work_pools?: components["schemas"]["WorkPoolFilter"]; - /** @default NAME_ASC */ - sort: components["schemas"]["FlowSort"]; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_latest_artifacts_artifacts_latest_filter_post */ - Body_read_latest_artifacts_artifacts_latest_filter_post: { - /** @default ID_DESC */ - sort: components["schemas"]["ArtifactCollectionSort"]; - /** - * Offset - * @default 0 - */ - offset: number; - artifacts?: components["schemas"]["ArtifactCollectionFilter"]; - flow_runs?: components["schemas"]["FlowRunFilter"]; - task_runs?: components["schemas"]["TaskRunFilter"]; - flows?: components["schemas"]["FlowFilter"]; - deployments?: components["schemas"]["DeploymentFilter"]; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_logs_logs_filter_post */ - Body_read_logs_logs_filter_post: { - /** - * Offset - * @default 0 - */ - offset: number; - logs?: components["schemas"]["LogFilter"]; - /** @default TIMESTAMP_ASC */ - sort: components["schemas"]["LogSort"]; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_saved_searches_saved_searches_filter_post */ - Body_read_saved_searches_saved_searches_filter_post: { - /** - * Offset - * @default 0 - */ - offset: number; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_task_run_counts_by_state_ui_task_runs_count_post */ - Body_read_task_run_counts_by_state_ui_task_runs_count_post: { - flows?: components["schemas"]["FlowFilter"] | null; - flow_runs?: components["schemas"]["FlowRunFilter"] | null; - task_runs?: components["schemas"]["TaskRunFilter"] | null; - deployments?: components["schemas"]["DeploymentFilter"] | null; - }; - /** Body_read_task_runs_task_runs_filter_post */ - Body_read_task_runs_task_runs_filter_post: { - /** @default ID_DESC */ - sort: components["schemas"]["TaskRunSort"]; - /** - * Offset - * @default 0 - */ - offset: number; - flows?: components["schemas"]["FlowFilter"] | null; - flow_runs?: components["schemas"]["FlowRunFilter"] | null; - task_runs?: components["schemas"]["TaskRunFilter"] | null; - deployments?: components["schemas"]["DeploymentFilter"] | null; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_task_workers_task_workers_filter_post */ - Body_read_task_workers_task_workers_filter_post: { - /** @description The task worker filter */ - task_worker_filter?: components["schemas"]["TaskWorkerFilter"] | null; - }; - /** Body_read_variables_variables_filter_post */ - Body_read_variables_variables_filter_post: { - /** - * Offset - * @default 0 - */ - offset: number; - variables?: components["schemas"]["VariableFilter"] | null; - /** @default NAME_ASC */ - sort: components["schemas"]["VariableSort"]; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_work_pools_work_pools_filter_post */ - Body_read_work_pools_work_pools_filter_post: { - work_pools?: components["schemas"]["WorkPoolFilter"] | null; - /** - * Offset - * @default 0 - */ - offset: number; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_work_queue_runs_work_queues__id__get_runs_post */ - Body_read_work_queue_runs_work_queues__id__get_runs_post: { - /** - * Scheduled Before - * Format: date-time - * @description Only flow runs scheduled to start before this time will be returned. - */ - scheduled_before?: string; - /** - * Agent Id - * @description An optional unique identifier for the agent making this query. If provided, the Prefect REST API will track the last time this agent polled the work queue. - */ - agent_id?: string | null; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_work_queues_work_pools__work_pool_name__queues_filter_post */ - Body_read_work_queues_work_pools__work_pool_name__queues_filter_post: { - work_queues?: components["schemas"]["WorkQueueFilter"]; - /** - * Offset - * @default 0 - */ - offset: number; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_work_queues_work_queues_filter_post */ - Body_read_work_queues_work_queues_filter_post: { - /** - * Offset - * @default 0 - */ - offset: number; - work_queues?: components["schemas"]["WorkQueueFilter"]; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_read_workers_work_pools__work_pool_name__workers_filter_post */ - Body_read_workers_work_pools__work_pool_name__workers_filter_post: { - workers?: components["schemas"]["WorkerFilter"] | null; - /** - * Offset - * @default 0 - */ - offset: number; - /** - * Limit - * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided. - */ - limit?: number; - }; - /** Body_reset_concurrency_limit_by_tag_concurrency_limits_tag__tag__reset_post */ - Body_reset_concurrency_limit_by_tag_concurrency_limits_tag__tag__reset_post: { - /** - * Slot Override - * @description Manual override for active concurrency limit slots. - */ - slot_override?: string[] | null; - }; - /** Body_resume_flow_run_flow_runs__id__resume_post */ - Body_resume_flow_run_flow_runs__id__resume_post: { - /** Run Input */ - run_input?: Record | null; - }; - /** Body_schedule_deployment_deployments__id__schedule_post */ - Body_schedule_deployment_deployments__id__schedule_post: { - /** - * Start Time - * Format: date-time - * @description The earliest date to schedule - */ - start_time?: string; - /** - * End Time - * Format: date-time - * @description The latest date to schedule - */ - end_time?: string; - /** - * Min Time - * Format: time-delta - * @description Runs will be scheduled until at least this long after the `start_time` - */ - min_time?: number; - /** - * Min Runs - * @description The minimum number of runs to schedule - */ - min_runs?: number; - /** - * Max Runs - * @description The maximum number of runs to schedule - */ - max_runs?: number; - }; - /** Body_set_flow_run_state_flow_runs__id__set_state_post */ - Body_set_flow_run_state_flow_runs__id__set_state_post: { - /** @description The intended state. */ - state: components["schemas"]["StateCreate"]; - /** - * Force - * @description If false, orchestration rules will be applied that may alter or prevent the state transition. If True, orchestration rules are not applied. - * @default false - */ - force: boolean; - }; - /** Body_set_task_run_state_task_runs__id__set_state_post */ - Body_set_task_run_state_task_runs__id__set_state_post: { - /** @description The intended state. */ - state: components["schemas"]["StateCreate"]; - /** - * Force - * @description If false, orchestration rules will be applied that may alter or prevent the state transition. If True, orchestration rules are not applied. - * @default false - */ - force: boolean; - }; - /** Body_task_run_history_task_runs_history_post */ - Body_task_run_history_task_runs_history_post: { - /** - * History Start - * Format: date-time - * @description The history's start time. - */ - history_start: string; - /** - * History End - * Format: date-time - * @description The history's end time. - */ - history_end: string; - /** - * History Interval - * Format: time-delta - * @description The size of each history interval, in seconds. Must be at least 1 second. - */ - history_interval: number; - flows?: components["schemas"]["FlowFilter"]; - flow_runs?: components["schemas"]["FlowRunFilter"]; - task_runs?: components["schemas"]["TaskRunFilter"]; - deployments?: components["schemas"]["DeploymentFilter"]; - }; - /** Body_validate_obj_ui_schemas_validate_post */ - Body_validate_obj_ui_schemas_validate_post: { - /** Json Schema */ - json_schema: Record; - /** Values */ - values: Record; - }; - /** Body_worker_heartbeat_work_pools__work_pool_name__workers_heartbeat_post */ - Body_worker_heartbeat_work_pools__work_pool_name__workers_heartbeat_post: { - /** - * Name - * @description The worker process name - */ - name: string; - /** - * Heartbeat Interval Seconds - * @description The worker's heartbeat interval in seconds - */ - heartbeat_interval_seconds?: number | null; - }; - /** - * CallWebhook - * @description Call a webhook when an Automation is triggered. - */ - CallWebhook: { - /** - * Type - * @default call-webhook - * @constant - * @enum {string} - */ - type: "call-webhook"; - /** - * Block Document Id - * Format: uuid - * @description The identifier of the webhook block to use - */ - block_document_id: string; - /** - * Payload - * @description An optional templatable payload to send when calling the webhook. - * @default - */ - payload: string; - }; - /** - * CancelFlowRun - * @description Cancels a flow run associated with the trigger - */ - CancelFlowRun: { - /** - * Type - * @default cancel-flow-run - * @constant - * @enum {string} - */ - type: "cancel-flow-run"; - }; - /** - * ChangeFlowRunState - * @description Changes the state of a flow run associated with the trigger - */ - ChangeFlowRunState: { - /** - * Type - * @default change-flow-run-state - * @constant - * @enum {string} - */ - type: "change-flow-run-state"; - /** - * Name - * @description The name of the state to change the flow run to - */ - name?: string | null; - /** @description The type of the state to change the flow run to */ - state: components["schemas"]["StateType"]; - /** - * Message - * @description An optional message to associate with the state change - */ - message?: string | null; - }; - /** - * CompoundTrigger - * @description A composite trigger that requires some number of triggers to have - * fired within the given time period - */ - "CompoundTrigger-Input": { - /** - * Type - * @default compound - * @constant - * @enum {string} - */ - type: "compound"; - /** - * Id - * Format: uuid - * @description The unique ID of this trigger - */ - id?: string; - /** Triggers */ - triggers: ( - | components["schemas"]["EventTrigger"] - | components["schemas"]["CompoundTrigger-Input"] - | components["schemas"]["SequenceTrigger-Input"] - )[]; - /** Within */ - within: number | null; - /** Require */ - require: number | ("any" | "all"); - }; - /** - * CompoundTrigger - * @description A composite trigger that requires some number of triggers to have - * fired within the given time period - */ - "CompoundTrigger-Output": { - /** - * Type - * @default compound - * @constant - * @enum {string} - */ - type: "compound"; - /** - * Id - * Format: uuid - * @description The unique ID of this trigger - */ - id?: string; - /** Triggers */ - triggers: ( - | components["schemas"]["EventTrigger"] - | components["schemas"]["CompoundTrigger-Output"] - | components["schemas"]["SequenceTrigger-Output"] - )[]; - /** Within */ - within: number | null; - /** Require */ - require: number | ("any" | "all"); - }; - /** - * ConcurrencyLimit - * @description An ORM representation of a concurrency limit. - */ - ConcurrencyLimit: { - /** - * Id - * Format: uuid - */ - id?: string; - /** Created */ - created?: string | null; - /** Updated */ - updated?: string | null; - /** - * Tag - * @description A tag the concurrency limit is applied to. - */ - tag: string; - /** - * Concurrency Limit - * @description The concurrency limit. - */ - concurrency_limit: number; - /** - * Active Slots - * @description A list of active run ids using a concurrency slot - */ - active_slots?: string[]; - }; - /** - * ConcurrencyLimitCreate - * @description Data used by the Prefect REST API to create a concurrency limit. - */ - ConcurrencyLimitCreate: { - /** - * Tag - * @description A tag the concurrency limit is applied to. - */ - tag: string; - /** - * Concurrency Limit - * @description The concurrency limit. - */ - concurrency_limit: number; - }; - /** - * ConcurrencyLimitStrategy - * @description Enumeration of concurrency collision strategies. - * @enum {string} - */ - ConcurrencyLimitStrategy: "ENQUEUE" | "CANCEL_NEW"; - /** - * ConcurrencyLimitV2 - * @description An ORM representation of a v2 concurrency limit. - */ - ConcurrencyLimitV2: { - /** - * Id - * Format: uuid - */ - id?: string; - /** Created */ - created?: string | null; - /** Updated */ - updated?: string | null; - /** - * Active - * @description Whether the concurrency limit is active. - * @default true - */ - active: boolean; - /** - * Name - * @description The name of the concurrency limit. - */ - name: string; - /** - * Limit - * @description The concurrency limit. - */ - limit: number; - /** - * Active Slots - * @description The number of active slots. - * @default 0 - */ - active_slots: number; - /** - * Denied Slots - * @description The number of denied slots. - * @default 0 - */ - denied_slots: number; - /** - * Slot Decay Per Second - * @description The decay rate for active slots when used as a rate limit. - * @default 0 - */ - slot_decay_per_second: number; - /** - * Avg Slot Occupancy Seconds - * @description The average amount of time a slot is occupied. - * @default 2 - */ - avg_slot_occupancy_seconds: number; - }; - /** - * ConcurrencyLimitV2Create - * @description Data used by the Prefect REST API to create a v2 concurrency limit. - */ - ConcurrencyLimitV2Create: { - /** - * Active - * @description Whether the concurrency limit is active. - * @default true - */ - active: boolean; - /** - * Name - * @description The name of the concurrency limit. - */ - name: string; - /** - * Limit - * @description The concurrency limit. - */ - limit: number; - /** - * Active Slots - * @description The number of active slots. - * @default 0 - */ - active_slots: number; - /** - * Denied Slots - * @description The number of denied slots. - * @default 0 - */ - denied_slots: number; - /** - * Slot Decay Per Second - * @description The decay rate for active slots when used as a rate limit. - * @default 0 - */ - slot_decay_per_second: number; - }; - /** - * ConcurrencyLimitV2Update - * @description Data used by the Prefect REST API to update a v2 concurrency limit. - */ - ConcurrencyLimitV2Update: { - /** Active */ - active?: boolean | null; - /** Name */ - name?: string | null; - /** Limit */ - limit?: number | null; - /** Active Slots */ - active_slots?: number | null; - /** Denied Slots */ - denied_slots?: number | null; - /** Slot Decay Per Second */ - slot_decay_per_second?: number | null; - }; - /** - * ConcurrencyOptions - * @description Class for storing the concurrency config in database. - */ - ConcurrencyOptions: { - collision_strategy: components["schemas"]["ConcurrencyLimitStrategy"]; - }; - /** - * Constant - * @description Represents constant input value to a task run. - */ - Constant: { - /** - * Input Type - * @default constant - * @constant - * @enum {string} - */ - input_type: "constant"; - /** Type */ - type: string; - }; - /** CountByState */ - CountByState: { - /** - * Completed - * @default 0 - */ - COMPLETED: number; - /** - * Pending - * @default 0 - */ - PENDING: number; - /** - * Running - * @default 0 - */ - RUNNING: number; - /** - * Failed - * @default 0 - */ - FAILED: number; - /** - * Cancelled - * @default 0 - */ - CANCELLED: number; - /** - * Crashed - * @default 0 - */ - CRASHED: number; - /** - * Paused - * @default 0 - */ - PAUSED: number; - /** - * Cancelling - * @default 0 - */ - CANCELLING: number; - /** - * Scheduled - * @default 0 - */ - SCHEDULED: number; - }; - /** - * Countable - * @enum {string} - */ - Countable: "day" | "time" | "event" | "resource"; - /** CreatedBy */ - CreatedBy: { - /** - * Id - * @description The id of the creator of the object. - */ - id?: string | null; - /** - * Type - * @description The type of the creator of the object. - */ - type?: string | null; - /** - * Display Value - * @description The display value for the creator. - */ - display_value?: string | null; - }; - /** - * CronSchedule - * @description Cron schedule - * - * NOTE: If the timezone is a DST-observing one, then the schedule will adjust - * itself appropriately. Cron's rules for DST are based on schedule times, not - * intervals. This means that an hourly cron schedule will fire on every new - * schedule hour, not every elapsed hour; for example, when clocks are set back - * this will result in a two-hour pause as the schedule will fire *the first - * time* 1am is reached and *the first time* 2am is reached, 120 minutes later. - * Longer schedules, such as one that fires at 9am every morning, will - * automatically adjust for DST. - * - * Args: - * cron (str): a valid cron string - * timezone (str): a valid timezone string in IANA tzdata format (for example, - * America/New_York). - * day_or (bool, optional): Control how croniter handles `day` and `day_of_week` - * entries. Defaults to True, matching cron which connects those values using - * OR. If the switch is set to False, the values are connected using AND. This - * behaves like fcron and enables you to e.g. define a job that executes each - * 2nd friday of a month by setting the days of month and the weekday. - */ - CronSchedule: { - /** Cron */ - cron: string; - /** Timezone */ - timezone?: string | null; - /** - * Day Or - * @description Control croniter behavior for handling day and day_of_week entries. - * @default true - */ - day_or: boolean; - }; - /** CsrfToken */ - CsrfToken: { - /** - * Id - * Format: uuid - */ - id?: string; - /** Created */ - created?: string | null; - /** Updated */ - updated?: string | null; - /** - * Token - * @description The CSRF token - */ - token: string; - /** - * Client - * @description The client id associated with the CSRF token - */ - client: string; - /** - * Expiration - * Format: date-time - * @description The expiration time of the CSRF token - */ - expiration: string; - }; - /** DependencyResult */ - DependencyResult: { - /** - * Id - * Format: uuid - */ - id: string; - /** Name */ - name: string; - /** Upstream Dependencies */ - upstream_dependencies: components["schemas"]["TaskRunResult"][]; - state: components["schemas"]["State"] | null; - /** Expected Start Time */ - expected_start_time: string | null; - /** Start Time */ - start_time: string | null; - /** End Time */ - end_time: string | null; - /** Total Run Time */ - total_run_time: number | null; - /** Estimated Run Time */ - estimated_run_time: number | null; - /** Untrackable Result */ - untrackable_result: boolean; - }; - /** - * DeploymentCreate - * @description Data used by the Prefect REST API to create a deployment. - */ - DeploymentCreate: { - /** - * Name - * @description The name of the deployment. - */ - name: string; - /** - * Flow Id - * Format: uuid - * @description The ID of the flow associated with the deployment. - */ - flow_id: string; - /** - * Paused - * @description Whether or not the deployment is paused. - * @default false - */ - paused: boolean; - /** - * Schedules - * @description A list of schedules for the deployment. - */ - schedules?: components["schemas"]["DeploymentScheduleCreate"][]; - /** - * Concurrency Limit - * @description The deployment's concurrency limit. - */ - concurrency_limit?: number | null; - /** @description The deployment's concurrency options. */ - concurrency_options?: components["schemas"]["ConcurrencyOptions"] | null; - /** - * Enforce Parameter Schema - * @description Whether or not the deployment should enforce the parameter schema. - * @default true - */ - enforce_parameter_schema: boolean; - /** - * Parameter Openapi Schema - * @description The parameter schema of the flow, including defaults. - */ - parameter_openapi_schema?: Record | null; - /** - * Parameters - * @description Parameters for flow runs scheduled by the deployment. - */ - parameters?: Record; - /** - * Tags - * @description A list of deployment tags. - */ - tags?: string[]; - /** Pull Steps */ - pull_steps?: Record[] | null; - /** Work Queue Name */ - work_queue_name?: string | null; - /** - * Work Pool Name - * @description The name of the deployment's work pool. - */ - work_pool_name?: string | null; - /** Storage Document Id */ - storage_document_id?: string | null; - /** Infrastructure Document Id */ - infrastructure_document_id?: string | null; - /** Description */ - description?: string | null; - /** Path */ - path?: string | null; - /** Version */ - version?: string | null; - /** Entrypoint */ - entrypoint?: string | null; - /** - * Job Variables - * @description Overrides for the flow's infrastructure configuration. - */ - job_variables?: Record; - }; - /** - * DeploymentFilter - * @description Filter for deployments. Only deployments matching all criteria will be returned. - */ - DeploymentFilter: { - /** - * @description Operator for combining filter criteria. Defaults to 'and_'. - * @default and_ - */ - operator: components["schemas"]["Operator"]; - /** @description Filter criteria for `Deployment.id` */ - id?: components["schemas"]["DeploymentFilterId"] | null; - /** @description Filter criteria for `Deployment.name` */ - name?: components["schemas"]["DeploymentFilterName"] | null; - /** @description Filter criteria for `Deployment.name` or `Flow.name` */ - flow_or_deployment_name?: - | components["schemas"]["DeploymentOrFlowNameFilter"] - | null; - /** @description Filter criteria for `Deployment.paused` */ - paused?: components["schemas"]["DeploymentFilterPaused"] | null; - /** @description Filter criteria for `Deployment.tags` */ - tags?: components["schemas"]["DeploymentFilterTags"] | null; - /** @description Filter criteria for `Deployment.work_queue_name` */ - work_queue_name?: - | components["schemas"]["DeploymentFilterWorkQueueName"] - | null; - /** - * @deprecated - * @description DEPRECATED: Prefer `Deployment.concurrency_limit_id` over `Deployment.concurrency_limit`. If provided, will be ignored for backwards-compatibility. Will be removed after December 2024. - */ - concurrency_limit?: - | components["schemas"]["DeploymentFilterConcurrencyLimit"] - | null; - }; - /** - * DeploymentFilterConcurrencyLimit - * @description DEPRECATED: Prefer `Deployment.concurrency_limit_id` over `Deployment.concurrency_limit`. - */ - DeploymentFilterConcurrencyLimit: { - /** - * Ge - * @description Only include deployments with a concurrency limit greater than or equal to this value - */ - ge_?: number | null; - /** - * Le - * @description Only include deployments with a concurrency limit less than or equal to this value - */ - le_?: number | null; - /** - * Is Null - * @description If true, only include deployments without a concurrency limit - */ - is_null_?: boolean | null; - }; - /** - * DeploymentFilterId - * @description Filter by `Deployment.id`. - */ - DeploymentFilterId: { - /** - * Any - * @description A list of deployment ids to include - */ - any_?: string[] | null; - }; - /** - * DeploymentFilterName - * @description Filter by `Deployment.name`. - */ - DeploymentFilterName: { - /** - * Any - * @description A list of deployment names to include - */ - any_?: string[] | null; - /** - * Like - * @description A case-insensitive partial match. For example, passing 'marvin' will match 'marvin', 'sad-Marvin', and 'marvin-robot'. - */ - like_?: string | null; - }; - /** - * DeploymentFilterPaused - * @description Filter by `Deployment.paused`. - */ - DeploymentFilterPaused: { - /** - * Eq - * @description Only returns where deployment is/is not paused - */ - eq_?: boolean | null; - }; - /** - * DeploymentFilterTags - * @description Filter by `Deployment.tags`. - */ - DeploymentFilterTags: { - /** - * @description Operator for combining filter criteria. Defaults to 'and_'. - * @default and_ - */ - operator: components["schemas"]["Operator"]; - /** - * All - * @description A list of tags. Deployments will be returned only if their tags are a superset of the list - */ - all_?: string[] | null; - /** - * Is Null - * @description If true, only include deployments without tags - */ - is_null_?: boolean | null; - }; - /** - * DeploymentFilterWorkQueueName - * @description Filter by `Deployment.work_queue_name`. - */ - DeploymentFilterWorkQueueName: { - /** - * Any - * @description A list of work queue names to include - */ - any_?: string[] | null; - }; - /** - * DeploymentFlowRunCreate - * @description Data used by the Prefect REST API to create a flow run from a deployment. - */ - DeploymentFlowRunCreate: { - /** @description The state of the flow run to create */ - state?: components["schemas"]["StateCreate"] | null; - /** - * Name - * @description The name of the flow run. Defaults to a random slug if not specified. - */ - name?: string; - /** Parameters */ - parameters?: Record; - /** - * Enforce Parameter Schema - * @description Whether or not to enforce the parameter schema on this run. - */ - enforce_parameter_schema?: boolean | null; - /** Context */ - context?: Record; - /** Infrastructure Document Id */ - infrastructure_document_id?: string | null; - /** @description The empirical policy for the flow run. */ - empirical_policy?: components["schemas"]["FlowRunPolicy"]; - /** - * Tags - * @description A list of tags for the flow run. - */ - tags?: string[]; - /** - * Idempotency Key - * @description An optional idempotency key. If a flow run with the same idempotency key has already been created, the existing flow run will be returned. - */ - idempotency_key?: string | null; - /** Parent Task Run Id */ - parent_task_run_id?: string | null; - /** Work Queue Name */ - work_queue_name?: string | null; - /** Job Variables */ - job_variables?: Record | null; - }; - /** - * DeploymentOrFlowNameFilter - * @description Filter by `Deployment.name` or `Flow.name` with a single input string for ilike filtering. - */ - DeploymentOrFlowNameFilter: { - /** - * Like - * @description A case-insensitive partial match on deployment or flow names. For example, passing 'example' might match deployments or flows with 'example' in their names. - */ - like_?: string | null; - }; - /** DeploymentPaginationResponse */ - DeploymentPaginationResponse: { - /** Results */ - results: components["schemas"]["DeploymentResponse"][]; - /** Count */ - count: number; - /** Limit */ - limit: number; - /** Pages */ - pages: number; - /** Page */ - page: number; - }; - /** DeploymentResponse */ - DeploymentResponse: { - /** - * Id - * Format: uuid - */ - id?: string; - /** Created */ - created?: string | null; - /** Updated */ - updated?: string | null; - /** - * Name - * @description The name of the deployment. - */ - name: string; - /** - * Version - * @description An optional version for the deployment. - */ - version?: string | null; - /** - * Description - * @description A description for the deployment. - */ - description?: string | null; - /** - * Flow Id - * Format: uuid - * @description The flow id associated with the deployment. - */ - flow_id: string; - /** - * Paused - * @description Whether or not the deployment is paused. - * @default false - */ - paused: boolean; - /** - * Schedules - * @description A list of schedules for the deployment. - */ - schedules?: components["schemas"]["DeploymentSchedule"][]; - /** - * Concurrency Limit - * @deprecated - * @description DEPRECATED: Prefer `global_concurrency_limit`. Will always be None for backwards compatibility. Will be removed after December 2024. - */ - concurrency_limit?: number | null; - /** @description The global concurrency limit object for enforcing the maximum number of flow runs that can be active at once. */ - global_concurrency_limit?: - | components["schemas"]["GlobalConcurrencyLimitResponse"] - | null; - /** @description The concurrency options for the deployment. */ - concurrency_options?: components["schemas"]["ConcurrencyOptions"] | null; - /** - * Job Variables - * @description Overrides to apply to the base infrastructure block at runtime. - */ - job_variables?: Record; - /** - * Parameters - * @description Parameters for flow runs scheduled by the deployment. - */ - parameters?: Record; - /** - * Tags - * @description A list of tags for the deployment - */ - tags?: string[]; - /** - * Work Queue Name - * @description The work queue for the deployment. If no work queue is set, work will not be scheduled. - */ - work_queue_name?: string | null; - /** - * Last Polled - * @description The last time the deployment was polled for status updates. - */ - last_polled?: string | null; - /** - * Parameter Openapi Schema - * @description The parameter schema of the flow, including defaults. - */ - parameter_openapi_schema?: Record | null; - /** - * Path - * @description The path to the working directory for the workflow, relative to remote storage or an absolute path. - */ - path?: string | null; - /** - * Pull Steps - * @description Pull steps for cloning and running this deployment. - */ - pull_steps?: Record[] | null; - /** - * Entrypoint - * @description The path to the entrypoint for the workflow, relative to the `path`. - */ - entrypoint?: string | null; - /** - * Storage Document Id - * @description The block document defining storage used for this flow. - */ - storage_document_id?: string | null; - /** - * Infrastructure Document Id - * @description The block document defining infrastructure to use for flow runs. - */ - infrastructure_document_id?: string | null; - /** @description Optional information about the creator of this deployment. */ - created_by?: components["schemas"]["CreatedBy"] | null; - /** @description Optional information about the updater of this deployment. */ - updated_by?: components["schemas"]["UpdatedBy"] | null; - /** - * Work Pool Name - * @description The name of the deployment's work pool. - */ - work_pool_name?: string | null; - /** - * @description Whether the deployment is ready to run flows. - * @default NOT_READY - */ - status: components["schemas"]["DeploymentStatus"] | null; - /** - * Enforce Parameter Schema - * @description Whether or not the deployment should enforce the parameter schema. - * @default true - */ - enforce_parameter_schema: boolean; - }; - /** DeploymentSchedule */ - DeploymentSchedule: { - /** - * Id - * Format: uuid - */ - id?: string; - /** Created */ - created?: string | null; - /** Updated */ - updated?: string | null; - /** - * Deployment Id - * @description The deployment id associated with this schedule. - */ - deployment_id?: string | null; - /** - * Schedule - * @description The schedule for the deployment. - */ - schedule: - | components["schemas"]["IntervalSchedule"] - | components["schemas"]["CronSchedule"] - | components["schemas"]["RRuleSchedule"]; - /** - * Active - * @description Whether or not the schedule is active. - * @default true - */ - active: boolean; - /** - * Max Scheduled Runs - * @description The maximum number of scheduled runs for the schedule. - */ - max_scheduled_runs?: number | null; - }; - /** DeploymentScheduleCreate */ - DeploymentScheduleCreate: { - /** - * Active - * @description Whether or not the schedule is active. - * @default true - */ - active: boolean; - /** - * Schedule - * @description The schedule for the deployment. - */ - schedule: - | components["schemas"]["IntervalSchedule"] - | components["schemas"]["CronSchedule"] - | components["schemas"]["RRuleSchedule"]; - /** - * Max Scheduled Runs - * @description The maximum number of scheduled runs for the schedule. - */ - max_scheduled_runs?: number | null; - }; - /** DeploymentScheduleUpdate */ - DeploymentScheduleUpdate: { - /** - * Active - * @description Whether or not the schedule is active. - */ - active?: boolean | null; - /** - * Schedule - * @description The schedule for the deployment. - */ - schedule?: - | components["schemas"]["IntervalSchedule"] - | components["schemas"]["CronSchedule"] - | components["schemas"]["RRuleSchedule"] - | null; - /** - * Max Scheduled Runs - * @description The maximum number of scheduled runs for the schedule. - */ - max_scheduled_runs?: number | null; - }; - /** - * DeploymentSort - * @description Defines deployment sorting options. - * @enum {string} - */ - DeploymentSort: "CREATED_DESC" | "UPDATED_DESC" | "NAME_ASC" | "NAME_DESC"; - /** - * DeploymentStatus - * @description Enumeration of deployment statuses. - * @enum {string} - */ - DeploymentStatus: "READY" | "NOT_READY"; - /** - * DeploymentUpdate - * @description Data used by the Prefect REST API to update a deployment. - */ - DeploymentUpdate: { - /** Version */ - version?: string | null; - /** Description */ - description?: string | null; - /** - * Paused - * @description Whether or not the deployment is paused. - * @default false - */ - paused: boolean; - /** - * Schedules - * @description A list of schedules for the deployment. - */ - schedules?: components["schemas"]["DeploymentScheduleCreate"][]; - /** - * Concurrency Limit - * @description The deployment's concurrency limit. - */ - concurrency_limit?: number | null; - /** @description The deployment's concurrency options. */ - concurrency_options?: components["schemas"]["ConcurrencyOptions"] | null; - /** - * Parameters - * @description Parameters for flow runs scheduled by the deployment. - */ - parameters?: Record | null; - /** - * Tags - * @description A list of deployment tags. - */ - tags?: string[]; - /** Work Queue Name */ - work_queue_name?: string | null; - /** - * Work Pool Name - * @description The name of the deployment's work pool. - */ - work_pool_name?: string | null; - /** Path */ - path?: string | null; - /** - * Job Variables - * @description Overrides for the flow's infrastructure configuration. - */ - job_variables?: Record | null; - /** Entrypoint */ - entrypoint?: string | null; - /** Storage Document Id */ - storage_document_id?: string | null; - /** Infrastructure Document Id */ - infrastructure_document_id?: string | null; - /** - * Enforce Parameter Schema - * @description Whether or not the deployment should enforce the parameter schema. - */ - enforce_parameter_schema?: boolean | null; - }; - /** - * DoNothing - * @description Do nothing when an Automation is triggered - */ - DoNothing: { - /** - * Type - * @default do-nothing - * @constant - * @enum {string} - */ - type: "do-nothing"; - }; - /** Edge */ - Edge: { - /** - * Id - * Format: uuid - */ - id: string; - }; - /** - * Event - * @description The client-side view of an event that has happened to a Resource - */ - Event: { - /** - * Occurred - * Format: date-time - * @description When the event happened from the sender's perspective - */ - occurred: string; - /** - * Event - * @description The name of the event that happened - */ - event: string; - /** @description The primary Resource this event concerns */ - resource: components["schemas"]["Resource"]; - /** - * Related - * @description A list of additional Resources involved in this event - */ - related?: components["schemas"]["RelatedResource"][]; - /** - * Payload - * @description An open-ended set of data describing what happened - */ - payload?: Record; - /** - * Id - * Format: uuid - * @description The client-provided identifier of this event - */ - id: string; - /** - * Follows - * @description The ID of an event that is known to have occurred prior to this one. If set, this may be used to establish a more precise ordering of causally-related events when they occur close enough together in time that the system may receive them out-of-order. - */ - follows?: string | null; - }; - /** EventAnyResourceFilter */ - EventAnyResourceFilter: { - /** - * Id - * @description Only include events for resources with these IDs - */ - id?: string[] | null; - /** - * Id Prefix - * @description Only include events for resources with IDs starting with these prefixes - */ - id_prefix?: string[] | null; - /** @description Only include events for related resources with these labels */ - labels?: components["schemas"]["ResourceSpecification"] | null; - }; - /** - * EventCount - * @description The count of events with the given filter value - */ - EventCount: { - /** - * Value - * @description The value to use for filtering - */ - value: string; - /** - * Label - * @description The value to display for this count - */ - label: string; - /** - * Count - * @description The count of matching events - */ - count: number; - /** - * Start Time - * Format: date-time - * @description The start time of this group of events - */ - start_time: string; - /** - * End Time - * Format: date-time - * @description The end time of this group of events - */ - end_time: string; - }; - /** EventFilter */ - EventFilter: { - /** @description Filter criteria for when the events occurred */ - occurred?: components["schemas"]["EventOccurredFilter"]; - /** @description Filter criteria for the event name */ - event?: components["schemas"]["EventNameFilter"] | null; - /** @description Filter criteria for any resource involved in the event */ - any_resource?: components["schemas"]["EventAnyResourceFilter"] | null; - /** @description Filter criteria for the resource of the event */ - resource?: components["schemas"]["EventResourceFilter"] | null; - /** @description Filter criteria for the related resources of the event */ - related?: components["schemas"]["EventRelatedFilter"] | null; - /** @description Filter criteria for the events' ID */ - id?: components["schemas"]["EventIDFilter"]; - /** - * @description The order to return filtered events - * @default DESC - */ - order: components["schemas"]["EventOrder"]; - }; - /** EventIDFilter */ - EventIDFilter: { - /** - * Id - * @description Only include events with one of these IDs - */ - id?: string[] | null; - }; - /** EventNameFilter */ - EventNameFilter: { - /** - * Prefix - * @description Only include events matching one of these prefixes - */ - prefix?: string[] | null; - /** - * Exclude Prefix - * @description Exclude events matching one of these prefixes - */ - exclude_prefix?: string[] | null; - /** - * Name - * @description Only include events matching one of these names exactly - */ - name?: string[] | null; - /** - * Exclude Name - * @description Exclude events matching one of these names exactly - */ - exclude_name?: string[] | null; - }; - /** EventOccurredFilter */ - EventOccurredFilter: { - /** - * Since - * Format: date-time - * @description Only include events after this time (inclusive) - */ - since?: string; - /** - * Until - * Format: date-time - * @description Only include events prior to this time (inclusive) - */ - until?: string; - }; - /** - * EventOrder - * @enum {string} - */ - EventOrder: "ASC" | "DESC"; - /** - * EventPage - * @description A single page of events returned from the API, with an optional link to the - * next page of results - */ - EventPage: { - /** - * Events - * @description The Events matching the query - */ - events: components["schemas"]["ReceivedEvent"][]; - /** - * Total - * @description The total number of matching Events - */ - total: number; - /** - * Next Page - * @description The URL for the next page of results, if there are more - */ - next_page: string | null; - }; - /** EventRelatedFilter */ - EventRelatedFilter: { - /** - * Id - * @description Only include events for related resources with these IDs - */ - id?: string[] | null; - /** - * Role - * @description Only include events for related resources in these roles - */ - role?: string[] | null; - /** - * Resources In Roles - * @description Only include events with specific related resources in specific roles - */ - resources_in_roles?: [string, string][] | null; - /** @description Only include events for related resources with these labels */ - labels?: components["schemas"]["ResourceSpecification"] | null; - }; - /** EventResourceFilter */ - EventResourceFilter: { - /** - * Id - * @description Only include events for resources with these IDs - */ - id?: string[] | null; - /** - * Id Prefix - * @description Only include events for resources with IDs starting with these prefixes. - */ - id_prefix?: string[] | null; - /** @description Only include events for resources with these labels */ - labels?: components["schemas"]["ResourceSpecification"] | null; - /** - * Distinct - * @description Only include events for distinct resources - * @default false - */ - distinct: boolean; - }; - /** - * EventTrigger - * @description A trigger that fires based on the presence or absence of events within a given - * period of time. - */ - EventTrigger: { - /** - * Type - * @default event - * @constant - * @enum {string} - */ - type: "event"; - /** - * Id - * Format: uuid - * @description The unique ID of this trigger - */ - id?: string; - /** @description Labels for resources which this trigger will match. */ - match?: components["schemas"]["ResourceSpecification"]; - /** @description Labels for related resources which this trigger will match. */ - match_related?: components["schemas"]["ResourceSpecification"]; - /** - * After - * @description The event(s) which must first been seen to fire this trigger. If empty, then fire this trigger immediately. Events may include trailing wildcards, like `prefect.flow-run.*` - */ - after?: string[]; - /** - * Expect - * @description The event(s) this trigger is expecting to see. If empty, this trigger will match any event. Events may include trailing wildcards, like `prefect.flow-run.*` - */ - expect?: string[]; - /** - * For Each - * @description Evaluate the trigger separately for each distinct value of these labels on the resource. By default, labels refer to the primary resource of the triggering event. You may also refer to labels from related resources by specifying `related::