diff --git a/.github/workflows/deploy_docs.yml b/.github/workflows/deploy_docs.yml index 4dbfd3fb9a..b7a2834745 100644 --- a/.github/workflows/deploy_docs.yml +++ b/.github/workflows/deploy_docs.yml @@ -1,4 +1,4 @@ -name: deploy docs +name: docs | deploy docs on: schedule: diff --git a/.github/workflows/get_docs_changes.yml b/.github/workflows/get_docs_changes.yml index d0fd936e00..49c6a6aa0c 100644 --- a/.github/workflows/get_docs_changes.yml +++ b/.github/workflows/get_docs_changes.yml @@ -1,4 +1,4 @@ -name: get docs changes +name: util | get docs changes on: workflow_call: @@ -13,6 +13,7 @@ env: jobs: get_docs_changes: + name: docs changes runs-on: ubuntu-latest outputs: changes_outside_docs: ${{ steps.check_changes.outputs.changes_outside_docs }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 317124f8c8..0c6ddcee73 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,5 +1,5 @@ -name: lint +name: lint | code & tests on: pull_request: @@ -14,10 +14,11 @@ concurrency: jobs: get_docs_changes: + name: docs changes uses: ./.github/workflows/get_docs_changes.yml run_lint: - name: Lint + name: lint needs: get_docs_changes if: needs.get_docs_changes.outputs.changes_outside_docs == 'true' strategy: @@ -65,12 +66,6 @@ jobs: export PATH=$PATH:"/c/Program Files/usr/bin" # needed for Windows make lint - # - name: print envs - # run: | - # echo "The GitHub Actor's username is: $GITHUB_ACTOR" - # echo "The GitHub repo owner is: $GITHUB_REPOSITORY_OWNER" - # echo "The GitHub repo is: $GITHUB_REPOSITORY" - matrix_job_required_check: name: Lint results needs: run_lint diff --git a/.github/workflows/test_airflow.yml b/.github/workflows/test_airflow.yml index 02513618d6..8e3a9cf3d8 100644 --- a/.github/workflows/test_airflow.yml +++ b/.github/workflows/test_airflow.yml @@ -1,4 +1,4 @@ -name: test airflow integration +name: tools | airflow on: pull_request: @@ -13,10 +13,11 @@ concurrency: jobs: get_docs_changes: + name: docs changes uses: ./.github/workflows/get_docs_changes.yml run_airflow: - name: Tests Airflow integration + name: test needs: get_docs_changes if: needs.get_docs_changes.outputs.changes_outside_docs == 'true' runs-on: ubuntu-latest diff --git a/.github/workflows/test_build_images.yml b/.github/workflows/test_build_images.yml index 489d776f40..c9a99eda2d 100644 --- a/.github/workflows/test_build_images.yml +++ b/.github/workflows/test_build_images.yml @@ -1,4 +1,4 @@ -name: test build docker images +name: tools | docker images on: pull_request: @@ -13,10 +13,11 @@ concurrency: jobs: get_docs_changes: + name: docs changes uses: ./.github/workflows/get_docs_changes.yml run_airflow: - name: Build alpine and airflow images + name: build needs: get_docs_changes if: needs.get_docs_changes.outputs.changes_outside_docs == 'true' runs-on: ubuntu-latest @@ -38,4 +39,4 @@ jobs: installer-parallel: true - name: Build images - run: make test-build-images \ No newline at end of file + run: make test-build-images diff --git a/.github/workflows/test_common.yml b/.github/workflows/test_common.yml index 2d96d2eb95..116d4e6b6a 100644 --- a/.github/workflows/test_common.yml +++ b/.github/workflows/test_common.yml @@ -1,4 +1,4 @@ -name: test common +name: common | common on: pull_request: @@ -16,10 +16,11 @@ env: jobs: get_docs_changes: + name: docs changes uses: ./.github/workflows/get_docs_changes.yml run_common: - name: Tests common dlt code + name: test needs: get_docs_changes if: needs.get_docs_changes.outputs.changes_outside_docs == 'true' strategy: diff --git a/.github/workflows/test_dbt_cloud.yml b/.github/workflows/test_dbt_cloud.yml index a123e051e8..0f5c169e6e 100644 --- a/.github/workflows/test_dbt_cloud.yml +++ b/.github/workflows/test_dbt_cloud.yml @@ -1,5 +1,5 @@ -name: test dbt cloud +name: tools | dbt cloud on: pull_request: @@ -22,22 +22,18 @@ env: jobs: get_docs_changes: + name: docs changes uses: ./.github/workflows/get_docs_changes.yml - if: ${{ !github.event.pull_request.head.repo.fork }} + if: ${{ !github.event.pull_request.head.repo.fork || contains(github.event.pull_request.labels.*.name, 'ci from fork')}} run_dbt_cloud: - name: Tests dbt cloud + name: test needs: get_docs_changes if: needs.get_docs_changes.outputs.changes_outside_docs == 'true' - strategy: - fail-fast: false - matrix: - os: ["ubuntu-latest"] - # os: ["ubuntu-latest", "macos-latest", "windows-latest"] defaults: run: shell: bash - runs-on: ${{ matrix.os }} + runs-on: "ubuntu-latest" steps: @@ -70,21 +66,4 @@ jobs: - run: | poetry run pytest tests/helpers/dbt_cloud_tests -k '(not venv)' - if: runner.os != 'Windows' name: Run dbt cloud - Linux/MAC - - run: | - poetry run pytest tests/helpers/dbt_cloud_tests -k "(not venv)" - if: runner.os == 'Windows' - name: Run dbt cloud - Windows - shell: cmd - - matrix_job_required_check: - name: dbt cloud tests - needs: run_dbt_cloud - runs-on: ubuntu-latest - if: always() - steps: - - name: Check matrix job results - if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') - run: | - echo "One or more matrix job tests failed or were cancelled. You may need to re-run them." && exit 1 diff --git a/.github/workflows/test_dbt_runner.yml b/.github/workflows/test_dbt_runner.yml index 1c425f14e9..0ca784a1ae 100644 --- a/.github/workflows/test_dbt_runner.yml +++ b/.github/workflows/test_dbt_runner.yml @@ -1,5 +1,5 @@ -name: test dbt runner +name: tools | dbt runner on: pull_request: @@ -19,22 +19,18 @@ env: jobs: get_docs_changes: + name: docs changes uses: ./.github/workflows/get_docs_changes.yml - if: ${{ !github.event.pull_request.head.repo.fork }} + if: ${{ !github.event.pull_request.head.repo.fork || contains(github.event.pull_request.labels.*.name, 'ci from fork')}} run_dbt: - name: Tests dbt runner + name: test needs: get_docs_changes if: needs.get_docs_changes.outputs.changes_outside_docs == 'true' - strategy: - fail-fast: false - matrix: - os: ["ubuntu-latest"] - # os: ["ubuntu-latest", "macos-latest", "windows-latest"] defaults: run: shell: bash - runs-on: ${{ matrix.os }} + runs-on: "ubuntu-latest" steps: @@ -84,21 +80,4 @@ jobs: - run: | poetry run pytest tests/helpers/dbt_tests --ignore=tests/helpers/dbt_tests/local -k '(not local)' - if: runner.os != 'Windows' name: Run dbt runner with venv - Linux/MAC - - run: | - poetry run pytest tests/helpers/dbt_tests --ignore=tests/helpers/dbt_tests/local -m "not forked" -k "(not local)" - if: runner.os == 'Windows' - name: Run dbt runner with venv - Windows - shell: cmd - - matrix_job_required_check: - name: dbt runner tests - needs: run_dbt - runs-on: ubuntu-latest - if: always() - steps: - - name: Check matrix job results - if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') - run: | - echo "One or more matrix job tests failed or were cancelled. You may need to re-run them." && exit 1 diff --git a/.github/workflows/test_destination_athena.yml b/.github/workflows/test_destination_athena.yml index b94bdc6ee2..959fffcfd4 100644 --- a/.github/workflows/test_destination_athena.yml +++ b/.github/workflows/test_destination_athena.yml @@ -1,5 +1,5 @@ -name: test athena +name: dest | athena on: pull_request: @@ -7,6 +7,8 @@ on: - master - devel workflow_dispatch: + schedule: + - cron: '0 2 * * *' concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -20,27 +22,23 @@ env: RUNTIME__DLTHUB_TELEMETRY_SEGMENT_WRITE_KEY: TLJiyRkGVZGCi2TtjClamXpFcxAA1rSB ACTIVE_DESTINATIONS: "[\"athena\"]" ALL_FILESYSTEM_DRIVERS: "[\"memory\"]" - EXCLUDED_DESTINATION_CONFIGURATIONS: "[\"athena-parquet-staging-iceberg\"]" + EXCLUDED_DESTINATION_CONFIGURATIONS: "[\"athena-parquet-staging-iceberg\", \"athena-parquet-no-staging-iceberg\"]" jobs: get_docs_changes: + name: docs changes uses: ./.github/workflows/get_docs_changes.yml # Tests that require credentials do not run in forks - if: ${{ !github.event.pull_request.head.repo.fork }} + if: ${{ !github.event.pull_request.head.repo.fork || contains(github.event.pull_request.labels.*.name, 'ci from fork')}} run_loader: - name: test destination athena + name: test needs: get_docs_changes if: needs.get_docs_changes.outputs.changes_outside_docs == 'true' - strategy: - fail-fast: false - matrix: - os: ["ubuntu-latest"] - # os: ["ubuntu-latest", "macos-latest", "windows-latest"] defaults: run: shell: bash - runs-on: ${{ matrix.os }} + runs-on: "ubuntu-latest" steps: @@ -75,22 +73,11 @@ jobs: run: pwd && echo "$DLT_SECRETS_TOML" > tests/.dlt/secrets.toml - run: | - poetry run pytest tests/load - if: runner.os != 'Windows' - name: Run tests Linux/MAC + poetry run pytest tests/load -m "essential" + name: Run essential tests Linux + if: ${{ ! (contains(github.event.pull_request.labels.*.name, 'ci full') || !github.event_name == 'schedule')}} + - run: | poetry run pytest tests/load - if: runner.os == 'Windows' - name: Run tests Windows - shell: cmd - - matrix_job_required_check: - name: Redshift, PostgreSQL and DuckDB tests - needs: run_loader - runs-on: ubuntu-latest - if: always() - steps: - - name: Check matrix job results - if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') - run: | - echo "One or more matrix job tests failed or were cancelled. You may need to re-run them." && exit 1 + name: Run all tests Linux + if: ${{ contains(github.event.pull_request.labels.*.name, 'ci full') || github.event_name == 'schedule'}} diff --git a/.github/workflows/test_destination_athena_iceberg.yml b/.github/workflows/test_destination_athena_iceberg.yml index acb5f35dfd..ea3cb3c06b 100644 --- a/.github/workflows/test_destination_athena_iceberg.yml +++ b/.github/workflows/test_destination_athena_iceberg.yml @@ -1,5 +1,5 @@ -name: test athena iceberg +name: dest | athena iceberg on: pull_request: @@ -7,6 +7,8 @@ on: - master - devel workflow_dispatch: + schedule: + - cron: '0 2 * * *' concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -20,27 +22,23 @@ env: RUNTIME__DLTHUB_TELEMETRY_SEGMENT_WRITE_KEY: TLJiyRkGVZGCi2TtjClamXpFcxAA1rSB ACTIVE_DESTINATIONS: "[\"athena\"]" ALL_FILESYSTEM_DRIVERS: "[\"memory\"]" - EXCLUDED_DESTINATION_CONFIGURATIONS: "[\"athena-no-staging\"]" + EXCLUDED_DESTINATION_CONFIGURATIONS: "[\"athena-no-staging\", \"athena-parquet-no-staging\"]" jobs: get_docs_changes: + name: docs changes uses: ./.github/workflows/get_docs_changes.yml # Tests that require credentials do not run in forks - if: ${{ !github.event.pull_request.head.repo.fork }} + if: ${{ !github.event.pull_request.head.repo.fork || contains(github.event.pull_request.labels.*.name, 'ci from fork')}} run_loader: - name: test destination athena iceberg + name: test needs: get_docs_changes if: needs.get_docs_changes.outputs.changes_outside_docs == 'true' - strategy: - fail-fast: false - matrix: - os: ["ubuntu-latest"] - # os: ["ubuntu-latest", "macos-latest", "windows-latest"] defaults: run: shell: bash - runs-on: ${{ matrix.os }} + runs-on: "ubuntu-latest" steps: @@ -75,22 +73,11 @@ jobs: run: pwd && echo "$DLT_SECRETS_TOML" > tests/.dlt/secrets.toml - run: | - poetry run pytest tests/load - if: runner.os != 'Windows' - name: Run tests Linux/MAC + poetry run pytest tests/load -m "essential" + name: Run essential tests Linux + if: ${{ ! (contains(github.event.pull_request.labels.*.name, 'ci full') || github.event_name == 'schedule')}} + - run: | poetry run pytest tests/load - if: runner.os == 'Windows' - name: Run tests Windows - shell: cmd - - matrix_job_required_check: - name: Redshift, PostgreSQL and DuckDB tests - needs: run_loader - runs-on: ubuntu-latest - if: always() - steps: - - name: Check matrix job results - if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') - run: | - echo "One or more matrix job tests failed or were cancelled. You may need to re-run them." && exit 1 + name: Run all tests Linux + if: ${{ contains(github.event.pull_request.labels.*.name, 'ci full') || github.event_name == 'schedule'}} diff --git a/.github/workflows/test_destination_bigquery.yml b/.github/workflows/test_destination_bigquery.yml index d11f7155d4..95f7edfb4d 100644 --- a/.github/workflows/test_destination_bigquery.yml +++ b/.github/workflows/test_destination_bigquery.yml @@ -1,5 +1,5 @@ -name: test bigquery +name: dest | bigquery on: pull_request: @@ -7,6 +7,8 @@ on: - master - devel workflow_dispatch: + schedule: + - cron: '0 2 * * *' concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -23,22 +25,18 @@ env: jobs: get_docs_changes: + name: docs changes uses: ./.github/workflows/get_docs_changes.yml - if: ${{ !github.event.pull_request.head.repo.fork }} + if: ${{ !github.event.pull_request.head.repo.fork || contains(github.event.pull_request.labels.*.name, 'ci from fork')}} run_loader: - name: Tests BigQuery loader + name: test needs: get_docs_changes if: needs.get_docs_changes.outputs.changes_outside_docs == 'true' - strategy: - fail-fast: false - matrix: - os: ["ubuntu-latest"] - # os: ["ubuntu-latest", "macos-latest", "windows-latest"] defaults: run: shell: bash - runs-on: ${{ matrix.os }} + runs-on: "ubuntu-latest" steps: @@ -57,12 +55,6 @@ jobs: virtualenvs-in-project: true installer-parallel: true - # - name: Get pip cache dir - # id: pip-cache - # run: | - # echo "::set-output name=dir::$(poetry env info -p)" - # echo "$(poetry env info -p)" - - name: Load cached venv id: cached-poetry-dependencies uses: actions/cache@v3 @@ -77,24 +69,13 @@ jobs: - name: create secrets.toml run: pwd && echo "$DLT_SECRETS_TOML" > tests/.dlt/secrets.toml - + - run: | - poetry run pytest tests/helpers/providers tests/load - if: runner.os != 'Windows' - name: Run tests Linux/MAC + poetry run pytest tests/load -m "essential" + name: Run essential tests Linux + if: ${{ ! (contains(github.event.pull_request.labels.*.name, 'ci full') || github.event_name == 'schedule')}} + - run: | - poetry run pytest tests/helpers/providers tests/load - if: runner.os == 'Windows' - name: Run tests Windows - shell: cmd - - matrix_job_required_check: - name: BigQuery loader tests - needs: run_loader - runs-on: ubuntu-latest - if: always() - steps: - - name: Check matrix job results - if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') - run: | - echo "One or more matrix job tests failed or were cancelled. You may need to re-run them." && exit 1 + poetry run pytest tests/load + name: Run all tests Linux + if: ${{ contains(github.event.pull_request.labels.*.name, 'ci full') || github.event_name == 'schedule'}} diff --git a/.github/workflows/test_destination_databricks.yml b/.github/workflows/test_destination_databricks.yml index 2a2fa8e10d..95ce20fb90 100644 --- a/.github/workflows/test_destination_databricks.yml +++ b/.github/workflows/test_destination_databricks.yml @@ -1,5 +1,5 @@ -name: test databricks +name: dest | databricks on: pull_request: @@ -7,6 +7,8 @@ on: - master - devel workflow_dispatch: + schedule: + - cron: '0 2 * * *' concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -23,22 +25,18 @@ env: jobs: get_docs_changes: + name: docs changes uses: ./.github/workflows/get_docs_changes.yml - if: ${{ !github.event.pull_request.head.repo.fork }} + if: ${{ !github.event.pull_request.head.repo.fork || contains(github.event.pull_request.labels.*.name, 'ci from fork')}} run_loader: - name: Tests Databricks loader + name: test needs: get_docs_changes if: needs.get_docs_changes.outputs.changes_outside_docs == 'true' - strategy: - fail-fast: false - matrix: - os: ["ubuntu-latest"] - # os: ["ubuntu-latest", "macos-latest", "windows-latest"] defaults: run: shell: bash - runs-on: ${{ matrix.os }} + runs-on: "ubuntu-latest" steps: @@ -71,22 +69,11 @@ jobs: run: pwd && echo "$DLT_SECRETS_TOML" > tests/.dlt/secrets.toml - run: | - poetry run pytest tests/load - if: runner.os != 'Windows' - name: Run tests Linux/MAC + poetry run pytest tests/load -m "essential" + name: Run essential tests Linux + if: ${{ ! (contains(github.event.pull_request.labels.*.name, 'ci full') || github.event_name == 'schedule')}} + - run: | poetry run pytest tests/load - if: runner.os == 'Windows' - name: Run tests Windows - shell: cmd - - matrix_job_required_check: - name: Databricks loader tests - needs: run_loader - runs-on: ubuntu-latest - if: always() - steps: - - name: Check matrix job results - if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') - run: | - echo "One or more matrix job tests failed or were cancelled. You may need to re-run them." && exit 1 + name: Run all tests Linux + if: ${{ contains(github.event.pull_request.labels.*.name, 'ci full') || github.event_name == 'schedule'}} diff --git a/.github/workflows/test_destination_mssql.yml b/.github/workflows/test_destination_mssql.yml index f96c64219d..adf7437f1b 100644 --- a/.github/workflows/test_destination_mssql.yml +++ b/.github/workflows/test_destination_mssql.yml @@ -1,5 +1,5 @@ -name: test MS SQL +name: dest | mssql on: pull_request: @@ -7,6 +7,8 @@ on: - master - devel workflow_dispatch: + schedule: + - cron: '0 2 * * *' concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -24,21 +26,18 @@ env: jobs: get_docs_changes: + name: docs changes uses: ./.github/workflows/get_docs_changes.yml - if: ${{ !github.event.pull_request.head.repo.fork }} + if: ${{ !github.event.pull_request.head.repo.fork || contains(github.event.pull_request.labels.*.name, 'ci from fork')}} run_loader: - name: Tests MS SQL loader + name: test needs: get_docs_changes if: needs.get_docs_changes.outputs.changes_outside_docs == 'true' - strategy: - fail-fast: false - matrix: - os: ["ubuntu-latest"] defaults: run: shell: bash - runs-on: ${{ matrix.os }} + runs-on: "ubuntu-latest" steps: @@ -74,23 +73,6 @@ jobs: - name: create secrets.toml run: pwd && echo "$DLT_SECRETS_TOML" > tests/.dlt/secrets.toml - - run: | - poetry run pytest tests/load - if: runner.os != 'Windows' - name: Run tests Linux/MAC - - run: | - poetry run pytest tests/load - if: runner.os == 'Windows' - name: Run tests Windows - shell: cmd - - matrix_job_required_check: - name: MS SQL loader tests - needs: run_loader - runs-on: ubuntu-latest - if: always() - steps: - - name: Check matrix job results - if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') - run: | - echo "One or more matrix job tests failed or were cancelled. You may need to re-run them." && exit 1 + # always run full suite, also on branches + - run: poetry run pytest tests/load + name: Run tests Linux diff --git a/.github/workflows/test_destination_qdrant.yml b/.github/workflows/test_destination_qdrant.yml index 3237801dbf..9131e9c62d 100644 --- a/.github/workflows/test_destination_qdrant.yml +++ b/.github/workflows/test_destination_qdrant.yml @@ -1,4 +1,4 @@ -name: test Qdrant +name: dest | qdrant on: pull_request: @@ -6,6 +6,8 @@ on: - master - devel workflow_dispatch: + schedule: + - cron: '0 2 * * *' concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -22,22 +24,18 @@ env: jobs: get_docs_changes: + name: docs changes uses: ./.github/workflows/get_docs_changes.yml - if: ${{ !github.event.pull_request.head.repo.fork }} + if: ${{ !github.event.pull_request.head.repo.fork || contains(github.event.pull_request.labels.*.name, 'ci from fork')}} run_loader: - name: Tests Qdrant loader + name: test needs: get_docs_changes if: needs.get_docs_changes.outputs.changes_outside_docs == 'true' - strategy: - fail-fast: false - matrix: - os: - - ubuntu-latest defaults: run: shell: bash - runs-on: ${{ matrix.os }} + runs-on: "ubuntu-latest" steps: - name: Check out @@ -67,17 +65,13 @@ jobs: - name: Install dependencies run: poetry install --no-interaction -E qdrant -E parquet --with sentry-sdk --with pipeline + - run: | - poetry run pytest tests/load/ - name: Run tests Linux + poetry run pytest tests/load -m "essential" + name: Run essential tests Linux + if: ${{ ! (contains(github.event.pull_request.labels.*.name, 'ci full') || github.event_name == 'schedule')}} - matrix_job_required_check: - name: Qdrant loader tests - needs: run_loader - runs-on: ubuntu-latest - if: always() - steps: - - name: Check matrix job results - if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') - run: | - echo "One or more matrix job tests failed or were cancelled. You may need to re-run them." && exit 1 + - run: | + poetry run pytest tests/load + name: Run all tests Linux + if: ${{ contains(github.event.pull_request.labels.*.name, 'ci full') || github.event_name == 'schedule'}} diff --git a/.github/workflows/test_destination_snowflake.yml b/.github/workflows/test_destination_snowflake.yml index 1ef290682c..c46ca95a6b 100644 --- a/.github/workflows/test_destination_snowflake.yml +++ b/.github/workflows/test_destination_snowflake.yml @@ -1,5 +1,5 @@ -name: test snowflake +name: dest | snowflake on: pull_request: @@ -7,6 +7,8 @@ on: - master - devel workflow_dispatch: + schedule: + - cron: '0 2 * * *' concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -23,22 +25,18 @@ env: jobs: get_docs_changes: + name: docs changes uses: ./.github/workflows/get_docs_changes.yml - if: ${{ !github.event.pull_request.head.repo.fork }} + if: ${{ !github.event.pull_request.head.repo.fork || contains(github.event.pull_request.labels.*.name, 'ci from fork')}} run_loader: - name: Tests Snowflake loader + name: test needs: get_docs_changes if: needs.get_docs_changes.outputs.changes_outside_docs == 'true' - strategy: - fail-fast: false - matrix: - os: ["ubuntu-latest"] - # os: ["ubuntu-latest", "macos-latest", "windows-latest"] defaults: run: shell: bash - runs-on: ${{ matrix.os }} + runs-on: "ubuntu-latest" steps: @@ -71,22 +69,11 @@ jobs: run: pwd && echo "$DLT_SECRETS_TOML" > tests/.dlt/secrets.toml - run: | - poetry run pytest tests/load - if: runner.os != 'Windows' - name: Run tests Linux/MAC + poetry run pytest tests/load -m "essential" + name: Run essential tests Linux + if: ${{ ! (contains(github.event.pull_request.labels.*.name, 'ci full') || github.event_name == 'schedule')}} + - run: | poetry run pytest tests/load - if: runner.os == 'Windows' - name: Run tests Windows - shell: cmd - - matrix_job_required_check: - name: Snowflake loader tests - needs: run_loader - runs-on: ubuntu-latest - if: always() - steps: - - name: Check matrix job results - if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') - run: | - echo "One or more matrix job tests failed or were cancelled. You may need to re-run them." && exit 1 + name: Run all tests Linux + if: ${{ contains(github.event.pull_request.labels.*.name, 'ci full') || github.event_name == 'schedule'}} diff --git a/.github/workflows/test_destination_synapse.yml b/.github/workflows/test_destination_synapse.yml index 774c83314f..997b0a2903 100644 --- a/.github/workflows/test_destination_synapse.yml +++ b/.github/workflows/test_destination_synapse.yml @@ -1,4 +1,4 @@ -name: test synapse +name: dest | synapse on: pull_request: @@ -6,6 +6,8 @@ on: - master - devel workflow_dispatch: + schedule: + - cron: '0 2 * * *' concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -22,21 +24,18 @@ env: jobs: get_docs_changes: + name: docs changes uses: ./.github/workflows/get_docs_changes.yml - if: ${{ !github.event.pull_request.head.repo.fork }} + if: ${{ !github.event.pull_request.head.repo.fork || contains(github.event.pull_request.labels.*.name, 'ci from fork')}} run_loader: - name: Tests Synapse loader + name: test needs: get_docs_changes if: needs.get_docs_changes.outputs.changes_outside_docs == 'true' - strategy: - fail-fast: false - matrix: - os: ["ubuntu-latest"] defaults: run: shell: bash - runs-on: ${{ matrix.os }} + runs-on: "ubuntu-latest" steps: @@ -73,22 +72,11 @@ jobs: run: pwd && echo "$DLT_SECRETS_TOML" > tests/.dlt/secrets.toml - run: | - poetry run pytest tests/load - if: runner.os != 'Windows' - name: Run tests Linux/MAC + poetry run pytest tests/load -m "essential" + name: Run essential tests Linux + if: ${{ ! (contains(github.event.pull_request.labels.*.name, 'ci full') || github.event_name == 'schedule')}} + - run: | poetry run pytest tests/load - if: runner.os == 'Windows' - name: Run tests Windows - shell: cmd - - matrix_job_required_check: - name: Synapse loader tests - needs: run_loader - runs-on: ubuntu-latest - if: always() - steps: - - name: Check matrix job results - if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') - run: | - echo "One or more matrix job tests failed or were cancelled. You may need to re-run them." && exit 1 + name: Run all tests Linux + if: ${{ contains(github.event.pull_request.labels.*.name, 'ci full') || github.event_name == 'schedule'}} diff --git a/.github/workflows/test_destinations.yml b/.github/workflows/test_destinations.yml index a635e2865c..a23f5f3f4d 100644 --- a/.github/workflows/test_destinations.yml +++ b/.github/workflows/test_destinations.yml @@ -1,5 +1,5 @@ -name: test redshift, postgres and filesystem buckets +name: dest | redshift, postgres and fs on: pull_request: @@ -7,6 +7,8 @@ on: - master - devel workflow_dispatch: + schedule: + - cron: '0 2 * * *' concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -30,23 +32,21 @@ env: jobs: get_docs_changes: + name: docs changes uses: ./.github/workflows/get_docs_changes.yml # Tests that require credentials do not run in forks - if: ${{ !github.event.pull_request.head.repo.fork }} + if: ${{ !github.event.pull_request.head.repo.fork || contains(github.event.pull_request.labels.*.name, 'ci from fork')}} run_loader: - name: test destinations redshift, postgres and filesystem + name: test needs: get_docs_changes if: needs.get_docs_changes.outputs.changes_outside_docs == 'true' strategy: fail-fast: false - matrix: - os: ["ubuntu-latest"] - # os: ["ubuntu-latest", "macos-latest", "windows-latest"] defaults: run: shell: bash - runs-on: ${{ matrix.os }} + runs-on: "ubuntu-latest" steps: @@ -81,22 +81,11 @@ jobs: run: pwd && echo "$DLT_SECRETS_TOML" > tests/.dlt/secrets.toml - run: | - poetry run pytest tests/load - if: runner.os != 'Windows' - name: Run tests Linux/MAC + poetry run pytest tests/load -m "essential" + name: Run essential tests Linux + if: ${{ ! (contains(github.event.pull_request.labels.*.name, 'ci full') || github.event_name == 'schedule')}} + - run: | poetry run pytest tests/load - if: runner.os == 'Windows' - name: Run tests Windows - shell: cmd - - matrix_job_required_check: - name: Redshift, PostgreSQL and DuckDB tests - needs: run_loader - runs-on: ubuntu-latest - if: always() - steps: - - name: Check matrix job results - if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') - run: | - echo "One or more matrix job tests failed or were cancelled. You may need to re-run them." && exit 1 + name: Run all tests Linux + if: ${{ contains(github.event.pull_request.labels.*.name, 'ci full') || github.event_name == 'schedule'}} diff --git a/.github/workflows/test_doc_snippets.yml b/.github/workflows/test_doc_snippets.yml index bafbf4bbee..877ec0e530 100644 --- a/.github/workflows/test_doc_snippets.yml +++ b/.github/workflows/test_doc_snippets.yml @@ -1,5 +1,5 @@ -name: docs snippet lint and test +name: docs | snippets & examples on: pull_request: @@ -23,11 +23,14 @@ env: RUNTIME__SLACK_INCOMING_HOOK: ${{ secrets.RUNTIME__SLACK_INCOMING_HOOK }} # detect if the workflow is executed in a repo fork IS_FORK: ${{ github.event.pull_request.head.repo.fork }} + jobs: run_lint: - name: Runs linter and tests on docs snippets + name: lint and test runs-on: ubuntu-latest + # Do not run on forks, unless allowed, secrets are used here + if: ${{ !github.event.pull_request.head.repo.fork || contains(github.event.pull_request.labels.*.name, 'ci from fork')}} steps: diff --git a/.github/workflows/test_local_destinations.yml b/.github/workflows/test_local_destinations.yml index 11377095d0..653b4dbd75 100644 --- a/.github/workflows/test_local_destinations.yml +++ b/.github/workflows/test_local_destinations.yml @@ -1,7 +1,7 @@ # Tests destinations that can run without credentials. # i.e. local postgres, duckdb, filesystem (with local fs/memory bucket) -name: test local destinations +name: dest | postgres, duckdb and fs on: pull_request: @@ -25,10 +25,11 @@ env: jobs: get_docs_changes: + name: docs changes uses: ./.github/workflows/get_docs_changes.yml run_loader: - name: test destinations postgres, duckdb and filesystem + name: test needs: get_docs_changes if: needs.get_docs_changes.outputs.changes_outside_docs == 'true' strategy: @@ -86,10 +87,11 @@ jobs: - name: Install dependencies run: poetry install --no-interaction -E postgres -E duckdb -E parquet -E filesystem -E cli -E weaviate --with sentry-sdk --with pipeline - + - name: create secrets.toml run: pwd && echo "$DLT_SECRETS_TOML" > tests/.dlt/secrets.toml + # always run full suite, also on branches - run: poetry run pytest tests/load && poetry run pytest tests/cli name: Run tests Linux env: diff --git a/pytest.ini b/pytest.ini index 0f9f6ab0d8..f3a85576ac 100644 --- a/pytest.ini +++ b/pytest.ini @@ -7,4 +7,6 @@ log_cli= 1 log_cli_level= INFO python_files = test_*.py *_test.py *snippets.py *snippet.pytest python_functions = *_test test_* *_snippet -filterwarnings= ignore::DeprecationWarning \ No newline at end of file +filterwarnings= ignore::DeprecationWarning +markers = + essential: marks all essential tests \ No newline at end of file diff --git a/tests/load/athena_iceberg/test_athena_iceberg.py b/tests/load/athena_iceberg/test_athena_iceberg.py index 6804b98427..0b8ca9c6ff 100644 --- a/tests/load/athena_iceberg/test_athena_iceberg.py +++ b/tests/load/athena_iceberg/test_athena_iceberg.py @@ -15,6 +15,8 @@ from tests.utils import skip_if_not_active from dlt.destinations.exceptions import DatabaseTerminalException +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential skip_if_not_active("athena") diff --git a/tests/load/bigquery/test_bigquery_client.py b/tests/load/bigquery/test_bigquery_client.py index a97b612ad0..b16790b07d 100644 --- a/tests/load/bigquery/test_bigquery_client.py +++ b/tests/load/bigquery/test_bigquery_client.py @@ -32,6 +32,9 @@ cm_yield_client_with_storage, ) +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.fixture(scope="module") def client() -> Iterator[BigQueryClient]: diff --git a/tests/load/bigquery/test_bigquery_table_builder.py b/tests/load/bigquery/test_bigquery_table_builder.py index fd58a6e033..dee7934216 100644 --- a/tests/load/bigquery/test_bigquery_table_builder.py +++ b/tests/load/bigquery/test_bigquery_table_builder.py @@ -33,6 +33,9 @@ ) from tests.load.utils import TABLE_UPDATE, sequence_generator, empty_schema +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + def test_configuration() -> None: os.environ["MYBG__CREDENTIALS__CLIENT_EMAIL"] = "1234" diff --git a/tests/load/databricks/test_databricks_configuration.py b/tests/load/databricks/test_databricks_configuration.py index 8d30d05e42..cc353f5894 100644 --- a/tests/load/databricks/test_databricks_configuration.py +++ b/tests/load/databricks/test_databricks_configuration.py @@ -8,6 +8,9 @@ from dlt.common.configuration import resolve_configuration from tests.utils import preserve_environ +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + def test_databricks_credentials_to_connector_params(): os.environ["CREDENTIALS__SERVER_HOSTNAME"] = "my-databricks.example.com" diff --git a/tests/load/duckdb/test_duckdb_client.py b/tests/load/duckdb/test_duckdb_client.py index 3deed7a77d..6cfb77d613 100644 --- a/tests/load/duckdb/test_duckdb_client.py +++ b/tests/load/duckdb/test_duckdb_client.py @@ -17,6 +17,9 @@ from tests.load.pipeline.utils import drop_pipeline, assert_table from tests.utils import patch_home_dir, autouse_test_storage, preserve_environ, TEST_STORAGE_ROOT +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.fixture(autouse=True) def delete_default_duckdb_credentials() -> Iterator[None]: diff --git a/tests/load/duckdb/test_duckdb_table_builder.py b/tests/load/duckdb/test_duckdb_table_builder.py index 542b18993c..d5e8ab59be 100644 --- a/tests/load/duckdb/test_duckdb_table_builder.py +++ b/tests/load/duckdb/test_duckdb_table_builder.py @@ -10,6 +10,9 @@ from tests.load.utils import TABLE_UPDATE, empty_schema +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.fixture def client(empty_schema: Schema) -> DuckDbClient: diff --git a/tests/load/duckdb/test_motherduck_client.py b/tests/load/duckdb/test_motherduck_client.py index ba60e0de6d..c1444eb87e 100644 --- a/tests/load/duckdb/test_motherduck_client.py +++ b/tests/load/duckdb/test_motherduck_client.py @@ -10,6 +10,9 @@ from tests.utils import patch_home_dir, preserve_environ, skip_if_not_active +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + skip_if_not_active("motherduck") diff --git a/tests/load/filesystem/test_aws_credentials.py b/tests/load/filesystem/test_aws_credentials.py index 62c2e3cd85..1a41144744 100644 --- a/tests/load/filesystem/test_aws_credentials.py +++ b/tests/load/filesystem/test_aws_credentials.py @@ -10,6 +10,9 @@ from tests.load.utils import ALL_FILESYSTEM_DRIVERS from tests.utils import preserve_environ, autouse_test_storage +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + if "s3" not in ALL_FILESYSTEM_DRIVERS: pytest.skip("s3 filesystem driver not configured", allow_module_level=True) diff --git a/tests/load/filesystem/test_azure_credentials.py b/tests/load/filesystem/test_azure_credentials.py index 093cd6dd19..467ba55a4f 100644 --- a/tests/load/filesystem/test_azure_credentials.py +++ b/tests/load/filesystem/test_azure_credentials.py @@ -11,6 +11,9 @@ from tests.common.configuration.utils import environment from tests.utils import preserve_environ, autouse_test_storage +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + if "az" not in ALL_FILESYSTEM_DRIVERS: pytest.skip("az filesystem driver not configured", allow_module_level=True) diff --git a/tests/load/filesystem/test_filesystem_client.py b/tests/load/filesystem/test_filesystem_client.py index 9948e26882..5d6dbe33ef 100644 --- a/tests/load/filesystem/test_filesystem_client.py +++ b/tests/load/filesystem/test_filesystem_client.py @@ -15,6 +15,9 @@ from tests.utils import clean_test_storage, init_test_logging from tests.utils import preserve_environ, autouse_test_storage +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.fixture(autouse=True) def storage() -> FileStorage: diff --git a/tests/load/filesystem/test_filesystem_common.py b/tests/load/filesystem/test_filesystem_common.py index 4c94766097..193ff0921b 100644 --- a/tests/load/filesystem/test_filesystem_common.py +++ b/tests/load/filesystem/test_filesystem_common.py @@ -20,6 +20,10 @@ from tests.common.configuration.utils import environment +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + + @with_config(spec=FilesystemConfiguration, sections=("destination", "filesystem")) def get_config(config: FilesystemConfiguration = None) -> FilesystemConfiguration: return config @@ -52,7 +56,7 @@ def check_file_exists(): def check_file_changed(): details = filesystem.info(file_url) assert details["size"] == 11 - assert (MTIME_DISPATCH[config.protocol](details) - now).seconds < 120 + assert (MTIME_DISPATCH[config.protocol](details) - now).seconds < 200 bucket_url = os.environ["DESTINATION__FILESYSTEM__BUCKET_URL"] config = get_config() diff --git a/tests/load/mssql/test_mssql_credentials.py b/tests/load/mssql/test_mssql_credentials.py index 0e38791f22..7d49196531 100644 --- a/tests/load/mssql/test_mssql_credentials.py +++ b/tests/load/mssql/test_mssql_credentials.py @@ -6,6 +6,9 @@ from dlt.destinations.impl.mssql.configuration import MsSqlCredentials +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + def test_mssql_credentials_defaults() -> None: creds = MsSqlCredentials() diff --git a/tests/load/mssql/test_mssql_table_builder.py b/tests/load/mssql/test_mssql_table_builder.py index 1b4a77a2ab..f7a87c14ee 100644 --- a/tests/load/mssql/test_mssql_table_builder.py +++ b/tests/load/mssql/test_mssql_table_builder.py @@ -11,6 +11,9 @@ from tests.load.utils import TABLE_UPDATE, empty_schema +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.fixture def client(empty_schema: Schema) -> MsSqlClient: diff --git a/tests/load/pipeline/test_arrow_loading.py b/tests/load/pipeline/test_arrow_loading.py index 59cd90c535..6ec09c2ccd 100644 --- a/tests/load/pipeline/test_arrow_loading.py +++ b/tests/load/pipeline/test_arrow_loading.py @@ -17,6 +17,9 @@ from tests.utils import preserve_environ from tests.cases import arrow_table_all_data_types, TArrowFormat +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.mark.parametrize( "destination_config", diff --git a/tests/load/pipeline/test_athena.py b/tests/load/pipeline/test_athena.py index 9c17be318f..845f9b8a27 100644 --- a/tests/load/pipeline/test_athena.py +++ b/tests/load/pipeline/test_athena.py @@ -11,6 +11,9 @@ from tests.load.pipeline.utils import destinations_configs, DestinationTestConfiguration +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.mark.parametrize( "destination_config", diff --git a/tests/load/pipeline/test_bigquery.py b/tests/load/pipeline/test_bigquery.py index 711d45fb1f..68533a5d43 100644 --- a/tests/load/pipeline/test_bigquery.py +++ b/tests/load/pipeline/test_bigquery.py @@ -6,6 +6,9 @@ from tests.load.pipeline.utils import destinations_configs, DestinationTestConfiguration from tests.load.utils import delete_dataset +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.mark.parametrize( "destination_config", diff --git a/tests/load/pipeline/test_merge_disposition.py b/tests/load/pipeline/test_merge_disposition.py index 19ee9a34c8..fc6b82d1f4 100644 --- a/tests/load/pipeline/test_merge_disposition.py +++ b/tests/load/pipeline/test_merge_disposition.py @@ -26,6 +26,7 @@ # ACTIVE_DESTINATIONS += ["motherduck"] +@pytest.mark.essential @pytest.mark.parametrize( "destination_config", destinations_configs(default_sql_configs=True), ids=lambda x: x.name ) @@ -153,6 +154,7 @@ def load_issues(): return load_issues +@pytest.mark.essential @pytest.mark.parametrize( "destination_config", destinations_configs(default_sql_configs=True), ids=lambda x: x.name ) diff --git a/tests/load/pipeline/test_pipelines.py b/tests/load/pipeline/test_pipelines.py index 017bef2c01..ea03fa91f1 100644 --- a/tests/load/pipeline/test_pipelines.py +++ b/tests/load/pipeline/test_pipelines.py @@ -43,6 +43,9 @@ ) from tests.load.pipeline.utils import destinations_configs, DestinationTestConfiguration +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.mark.parametrize( "destination_config", diff --git a/tests/load/pipeline/test_redshift.py b/tests/load/pipeline/test_redshift.py index 44234ec64b..40e13f1a4c 100644 --- a/tests/load/pipeline/test_redshift.py +++ b/tests/load/pipeline/test_redshift.py @@ -8,6 +8,9 @@ from tests.cases import table_update_and_row, assert_all_data_types_row from tests.pipeline.utils import assert_load_info +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.mark.parametrize( "destination_config", diff --git a/tests/load/pipeline/test_replace_disposition.py b/tests/load/pipeline/test_replace_disposition.py index a69d4440dc..6efde6e019 100644 --- a/tests/load/pipeline/test_replace_disposition.py +++ b/tests/load/pipeline/test_replace_disposition.py @@ -16,6 +16,7 @@ ) +@pytest.mark.essential @pytest.mark.parametrize( "destination_config", destinations_configs( diff --git a/tests/load/pipeline/test_restore_state.py b/tests/load/pipeline/test_restore_state.py index e50654adcc..d421819121 100644 --- a/tests/load/pipeline/test_restore_state.py +++ b/tests/load/pipeline/test_restore_state.py @@ -39,6 +39,7 @@ def duckdb_pipeline_location() -> None: del os.environ["DESTINATION__DUCKDB__CREDENTIALS"] +@pytest.mark.essential @pytest.mark.parametrize( "destination_config", destinations_configs( diff --git a/tests/load/postgres/test_postgres_client.py b/tests/load/postgres/test_postgres_client.py index 896e449b28..a0fbd85b5b 100644 --- a/tests/load/postgres/test_postgres_client.py +++ b/tests/load/postgres/test_postgres_client.py @@ -15,6 +15,9 @@ from tests.load.utils import expect_load_file, prepare_table, yield_client_with_storage from tests.common.configuration.utils import environment +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.fixture def file_storage() -> FileStorage: diff --git a/tests/load/postgres/test_postgres_table_builder.py b/tests/load/postgres/test_postgres_table_builder.py index 0ab1343a3b..9362b44e18 100644 --- a/tests/load/postgres/test_postgres_table_builder.py +++ b/tests/load/postgres/test_postgres_table_builder.py @@ -13,6 +13,9 @@ from tests.load.utils import TABLE_UPDATE, empty_schema +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.fixture def client(empty_schema: Schema) -> PostgresClient: diff --git a/tests/load/qdrant/test_pipeline.py b/tests/load/qdrant/test_pipeline.py index 4c1361dcca..fcc8fcbd71 100644 --- a/tests/load/qdrant/test_pipeline.py +++ b/tests/load/qdrant/test_pipeline.py @@ -11,6 +11,9 @@ from tests.load.qdrant.utils import drop_active_pipeline_data, assert_collection from tests.load.utils import sequence_generator +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.fixture(autouse=True) def drop_qdrant_data() -> Iterator[None]: diff --git a/tests/load/redshift/test_redshift_client.py b/tests/load/redshift/test_redshift_client.py index f5efc16a47..03bb57c3b4 100644 --- a/tests/load/redshift/test_redshift_client.py +++ b/tests/load/redshift/test_redshift_client.py @@ -19,6 +19,9 @@ from tests.utils import TEST_STORAGE_ROOT, autouse_test_storage, skipifpypy from tests.load.utils import expect_load_file, prepare_table, yield_client_with_storage +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.fixture def file_storage() -> FileStorage: diff --git a/tests/load/redshift/test_redshift_table_builder.py b/tests/load/redshift/test_redshift_table_builder.py index bc132c7818..2427bc7cfe 100644 --- a/tests/load/redshift/test_redshift_table_builder.py +++ b/tests/load/redshift/test_redshift_table_builder.py @@ -14,6 +14,9 @@ from tests.load.utils import TABLE_UPDATE, empty_schema +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.fixture def client(empty_schema: Schema) -> RedshiftClient: diff --git a/tests/load/snowflake/test_snowflake_configuration.py b/tests/load/snowflake/test_snowflake_configuration.py index d0ca4de41b..2add5c0017 100644 --- a/tests/load/snowflake/test_snowflake_configuration.py +++ b/tests/load/snowflake/test_snowflake_configuration.py @@ -16,6 +16,9 @@ from tests.common.configuration.utils import environment +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + def test_connection_string_with_all_params() -> None: url = "snowflake://user1:pass1@host1/db1?warehouse=warehouse1&role=role1&private_key=cGs%3D&private_key_passphrase=paphr" diff --git a/tests/load/snowflake/test_snowflake_table_builder.py b/tests/load/snowflake/test_snowflake_table_builder.py index 5d7108803e..bdbe888fb5 100644 --- a/tests/load/snowflake/test_snowflake_table_builder.py +++ b/tests/load/snowflake/test_snowflake_table_builder.py @@ -14,6 +14,9 @@ from tests.load.utils import TABLE_UPDATE, empty_schema +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.fixture def snowflake_client(empty_schema: Schema) -> SnowflakeClient: diff --git a/tests/load/synapse/test_synapse_configuration.py b/tests/load/synapse/test_synapse_configuration.py index 4055cbab38..f366d87d09 100644 --- a/tests/load/synapse/test_synapse_configuration.py +++ b/tests/load/synapse/test_synapse_configuration.py @@ -8,6 +8,9 @@ SynapseCredentials, ) +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + def test_synapse_configuration() -> None: # By default, unique indexes should not be created. diff --git a/tests/load/synapse/test_synapse_table_builder.py b/tests/load/synapse/test_synapse_table_builder.py index 8575835820..9ee2ebe202 100644 --- a/tests/load/synapse/test_synapse_table_builder.py +++ b/tests/load/synapse/test_synapse_table_builder.py @@ -19,6 +19,9 @@ TABLE_INDEX_TYPE_TO_SYNAPSE_ATTR, ) +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.fixture def client(empty_schema: Schema) -> SynapseClient: diff --git a/tests/load/synapse/test_synapse_table_indexing.py b/tests/load/synapse/test_synapse_table_indexing.py index df90933de4..71f419cbca 100644 --- a/tests/load/synapse/test_synapse_table_indexing.py +++ b/tests/load/synapse/test_synapse_table_indexing.py @@ -17,6 +17,8 @@ ) # this import ensures all test data gets removed from tests.load.synapse.utils import get_storage_table_index_type +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential TABLE_INDEX_TYPE_COLUMN_SCHEMA_PARAM_GRID = [ ("heap", None), diff --git a/tests/load/test_insert_job_client.py b/tests/load/test_insert_job_client.py index e353ec34eb..c8ca284796 100644 --- a/tests/load/test_insert_job_client.py +++ b/tests/load/test_insert_job_client.py @@ -26,7 +26,7 @@ def file_storage() -> FileStorage: def client(request) -> Iterator[InsertValuesJobClient]: yield from yield_client_with_storage(request.param.destination) # type: ignore[misc] - +@pytest.mark.essential @pytest.mark.parametrize( "client", destinations_configs(default_sql_configs=True, subset=DEFAULT_SUBSET), diff --git a/tests/load/test_job_client.py b/tests/load/test_job_client.py index 2e23086f81..a5dfb5d829 100644 --- a/tests/load/test_job_client.py +++ b/tests/load/test_job_client.py @@ -156,6 +156,7 @@ def test_get_update_basic_schema(client: SqlJobClientBase) -> None: assert this_schema == newest_schema +@pytest.mark.essential @pytest.mark.parametrize( "client", destinations_configs(default_sql_configs=True), indirect=True, ids=lambda x: x.name ) diff --git a/tests/load/test_sql_client.py b/tests/load/test_sql_client.py index d82925a7d3..da162621b1 100644 --- a/tests/load/test_sql_client.py +++ b/tests/load/test_sql_client.py @@ -139,6 +139,7 @@ def test_malformed_execute_parameters(client: SqlJobClientBase) -> None: assert client.sql_client.is_dbapi_exception(term_ex.value.dbapi_exception) +@pytest.mark.essential @pytest.mark.parametrize( "client", destinations_configs(default_sql_configs=True), indirect=True, ids=lambda x: x.name ) @@ -186,6 +187,7 @@ def test_execute_sql(client: SqlJobClientBase) -> None: assert len(rows) == 0 +@pytest.mark.essential @pytest.mark.parametrize( "client", destinations_configs(default_sql_configs=True), indirect=True, ids=lambda x: x.name ) @@ -204,6 +206,7 @@ def test_execute_ddl(client: SqlJobClientBase) -> None: assert rows[0][0] == Decimal("1.0") +@pytest.mark.essential @pytest.mark.parametrize( "client", destinations_configs(default_sql_configs=True), indirect=True, ids=lambda x: x.name ) @@ -246,6 +249,7 @@ def test_execute_query(client: SqlJobClientBase) -> None: assert len(rows) == 0 +@pytest.mark.essential @pytest.mark.parametrize( "client", destinations_configs(default_sql_configs=True), indirect=True, ids=lambda x: x.name ) @@ -297,6 +301,7 @@ def test_execute_df(client: SqlJobClientBase) -> None: assert df_3 is None +@pytest.mark.essential @pytest.mark.parametrize( "client", destinations_configs(default_sql_configs=True), indirect=True, ids=lambda x: x.name ) diff --git a/tests/load/weaviate/test_naming.py b/tests/load/weaviate/test_naming.py index 290879cb67..613582e9f8 100644 --- a/tests/load/weaviate/test_naming.py +++ b/tests/load/weaviate/test_naming.py @@ -5,6 +5,9 @@ from tests.common.utils import load_yml_case +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @dlt.source def small(): diff --git a/tests/load/weaviate/test_pipeline.py b/tests/load/weaviate/test_pipeline.py index dc23644940..25adc29c89 100644 --- a/tests/load/weaviate/test_pipeline.py +++ b/tests/load/weaviate/test_pipeline.py @@ -16,6 +16,9 @@ from .utils import assert_class, drop_active_pipeline_data from tests.load.utils import sequence_generator +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.fixture(autouse=True) def drop_weaviate_schema() -> Iterator[None]: diff --git a/tests/load/weaviate/test_weaviate_client.py b/tests/load/weaviate/test_weaviate_client.py index 3f966c2330..11d3f13db9 100644 --- a/tests/load/weaviate/test_weaviate_client.py +++ b/tests/load/weaviate/test_weaviate_client.py @@ -26,6 +26,9 @@ from .utils import drop_active_pipeline_data +# mark all tests as essential, do not remove +pytestmark = pytest.mark.essential + @pytest.fixture(autouse=True) def drop_weaviate_schema() -> Iterator[None]: