From 7e9231966ebf6f300bb9ecdb913b01040c9b9a0d Mon Sep 17 00:00:00 2001 From: Chetan Kini Date: Tue, 6 Dec 2022 14:56:45 -0500 Subject: [PATCH] [MAINTENANCE] Rename `GE` to `GX` across codebase (GREAT-1352) (#6494) --- SLACK_GUIDELINES.md | 4 +- azure-pipelines-dev.yml | 22 ++-- azure-pipelines.yml | 4 +- azure/user-install-matrix.yml | 4 +- .../README.md | 38 +++--- docker/Dockerfile | 12 +- ...t-data_context-data_context-DataContext.md | 2 +- docs/contributing/style_guides/docs_style.md | 2 +- .../how_to_use_great_expectations_in_flyte.md | 4 +- ...with_google_cloud_platform_and_bigquery.md | 8 +- .../cloud/gcs/pandas.md | 2 +- ...configure_an_inferredassetdataconnector.md | 2 +- .../how_to_use_custom_expectations.md | 4 +- .../how_to_use_the_great_expectations_cli.md | 4 +- .../how_to_write_a_how_to_guide.md | 2 +- docs/guides/miscellaneous/migration_guide.md | 50 ++++---- docs/integrations/integration_datahub.md | 14 +-- docs/reference/anonymous_usage_statistics.md | 2 +- docs_rtd/_static/style.css | 2 +- docs_rtd/contributing/miscellaneous.rst | 10 +- docs_rtd/contributing/style_guide.rst | 4 +- ...eate_renderers_for_custom_expectations.rst | 2 +- ...figure_a_self_managed_spark_datasource.rst | 2 +- ...onfigure_an_inferredassetdataconnector.rst | 2 +- ...o_configure_sorting_in_data_connectors.rst | 10 +- .../how_to_create_custom_expectations.rst | 4 +- ...request_using_an_active_data_connector.rst | 2 +- .../how_to_guides/migrating_versions.rst | 8 +- .../how_to_write_a_how_to_guide.rst | 6 +- .../how_to_create_expectations.rst | 4 +- .../initialize_a_data_context.rst | 8 +- .../initialize_a_data_context.rst | 6 +- .../tutorials/how_to_create_expectations.rst | 4 +- .../workflows_patterns/deployment_airflow.rst | 4 +- .../reference/core_concepts/data_docs.rst | 6 +- .../expectations/expectations.rst | 6 +- .../spare_parts/data_context_reference.rst | 8 +- .../spare_parts/profiling_reference.rst | 6 +- examples/integrations/airflow/README.md | 2 +- great_expectations/checkpoint/actions.py | 2 +- great_expectations/checkpoint/util.py | 2 +- great_expectations/cli/batch_request.py | 4 +- great_expectations/cli/checkpoint.py | 4 +- great_expectations/cli/cli.py | 4 +- great_expectations/cli/cli_messages.py | 2 +- great_expectations/cli/datasource.py | 2 +- great_expectations/cli/init.py | 4 +- great_expectations/cli/project.py | 10 +- great_expectations/cli/suite.py | 2 +- great_expectations/cli/toolkit.py | 24 ++-- great_expectations/cli/v012/checkpoint.py | 2 +- great_expectations/cli/v012/cli_messages.py | 2 +- great_expectations/cli/v012/datasource.py | 8 +- great_expectations/cli/v012/init.py | 2 +- great_expectations/cli/v012/project.py | 6 +- great_expectations/cli/v012/suite.py | 2 +- great_expectations/cli/v012/toolkit.py | 18 +-- .../core/evaluation_parameters.py | 8 +- .../core/usage_statistics/anonymizers/base.py | 12 +- .../anonymizers/expectation_anonymizer.py | 2 +- .../anonymizers/profiler_anonymizer.py | 2 +- .../usage_statistics/execution_environment.py | 12 +- .../usage_statistics/package_dependencies.py | 38 +++--- .../core/usage_statistics/usage_statistics.py | 6 +- great_expectations/core/yaml_handler.py | 2 +- .../config_validator/yaml_config_validator.py | 2 +- .../data_context/abstract_data_context.py | 30 ++--- .../data_context/base_data_context.py | 16 +-- .../data_context/cloud_data_context.py | 20 ++-- .../data_context/data_context/data_context.py | 40 +++---- .../data_context/explorer_data_context.py | 2 +- .../data_context/file_data_context.py | 2 +- .../data_context/data_context_variables.py | 2 +- .../data_context/store/checkpoint_store.py | 4 +- .../data_context/store/data_context_store.py | 2 +- .../data_context/store/datasource_store.py | 4 +- .../data_context/store/expectations_store.py | 4 +- .../store/gx_cloud_store_backend.py | 28 ++--- .../data_context/store/html_site_store.py | 2 +- .../store/inline_store_backend.py | 2 +- .../data_context/store/json_site_store.py | 6 +- .../data_context/store/profiler_store.py | 4 +- .../data_context/store/store.py | 4 +- .../data_context/store/validations_store.py | 4 +- great_expectations/data_context/templates.py | 2 +- great_expectations/data_context/types/base.py | 22 ++-- great_expectations/data_context/util.py | 2 +- .../dataset/sqlalchemy_dataset.py | 112 +++++++++--------- great_expectations/dataset/util.py | 4 +- .../batch_kwargs_generator.py | 2 +- .../table_batch_kwargs_generator.py | 12 +- .../data_connector/data_connector.py | 2 +- .../inferred_asset_sql_data_connector.py | 4 +- .../datasource/pandas_datasource.py | 2 +- .../datasource/sparkdf_datasource.py | 2 +- .../datasource/sqlalchemy_datasource.py | 8 +- .../sparkdf_execution_engine.py | 2 +- .../sqlalchemy_data_sampler.py | 8 +- .../sqlalchemy_data_splitter.py | 34 +++--- .../execution_engine/sqlalchemy_batch_data.py | 50 ++++---- .../execution_engine/sqlalchemy_dialect.py | 12 +- .../sqlalchemy_execution_engine.py | 48 ++++---- ...ct_column_kl_divergence_to_be_less_than.py | 3 - ...ct_column_quantile_values_to_be_between.py | 3 - .../expect_column_values_to_be_of_type.py | 4 +- .../expectations/expectation.py | 14 +-- .../column_quantile_values.py | 16 +-- .../column_standard_deviation.py | 6 +- .../metrics/map_metric_provider.py | 6 +- .../metrics/table_metrics/table_head.py | 4 +- .../expectations/metrics/util.py | 4 +- great_expectations/jupyter_ux/__init__.py | 2 +- .../profile/user_configurable_profiler.py | 40 +++---- .../render/renderer/site_builder.py | 2 +- great_expectations/util.py | 6 +- reqs/requirements-dev-lite.txt | 2 +- scripts/build_api_docs.py | 4 +- tests/cli/conftest.py | 2 +- tests/cli/test_checkpoint.py | 10 +- tests/cli/test_datasource_new_pandas_paths.py | 2 +- tests/cli/test_datasource_pandas.py | 8 +- tests/cli/test_datasource_snowflake.py | 2 +- tests/cli/test_datasource_sqlite.py | 2 +- tests/cli/test_docs.py | 8 +- tests/cli/test_init.py | 2 +- tests/cli/test_init_pandas.py | 12 +- tests/cli/test_init_sqlite.py | 12 +- .../upgrade_helpers/test_upgrade_helper.py | 30 ++--- tests/cli/v012/test_checkpoint.py | 8 +- tests/cli/v012/test_datasource_pandas.py | 2 +- tests/cli/v012/test_datasource_sqlite.py | 2 +- tests/cli/v012/test_init.py | 8 +- tests/cli/v012/test_init_pandas.py | 12 +- tests/cli/v012/test_init_sqlite.py | 12 +- tests/cli/v012/test_suite.py | 4 +- tests/cli/v012/test_suite_pre_v013.py | 4 +- .../test_upgrade_helper_pre_v013.py | 16 +-- tests/conftest.py | 2 +- tests/core/test_evaluation_parameters.py | 2 +- .../test_execution_environment.py | 12 +- .../test_package_dependencies.py | 18 +-- .../test_expectation_suite_crud.py | 2 +- ...data_connector_sparkdf_execution_engine.py | 2 +- .../great_expectations/great_expectations.yml | 2 +- .../uncommitted/config_variables.yml | 2 +- .../plugins/my_custom_non_core_ge_class.py | 2 +- .../great_expectations.yml | 2 +- .../test_datasource_store_cloud_backend.py | 2 +- .../store/test_gx_cloud_store_backend.py | 2 +- .../data_context/store/test_store_backends.py | 6 +- tests/data_context/test_data_context.py | 22 ++-- .../test_data_context_data_docs_api.py | 14 +-- .../test_data_context_test_yaml_config.py | 2 +- ...ta_context_test_yaml_config_usage_stats.py | 6 +- tests/data_context/test_data_context_utils.py | 2 +- .../test_data_context_variables.py | 4 +- .../datasource/test_datasource_anonymizer.py | 2 +- tests/datasource/test_pandas_datasource.py | 2 +- tests/datasource/test_sparkdf_datasource.py | 2 +- ...st_sqlalchemy_execution_engine_sampling.py | 80 ++++++------- .../test_sqlalchemy_batch_data.py | 4 +- .../test_sqlalchemy_dialect.py | 14 +-- .../test_sqlalchemy_execution_engine.py | 6 +- ...das_execution_engine_with_gcp_installed.py | 2 +- .../simple_build_data_docs.py | 10 +- tests/integration/db/bigquery.py | 2 +- .../how_to_configure_credentials.py | 2 +- .../getting-started/getting_started.py | 10 +- .../gcp_deployment/ge_checkpoint_bigquery.py | 4 +- .../gcp_deployment/ge_checkpoint_gcs.py | 4 +- .../great_expectations/great_expectations.yml | 2 +- tests/integration/test_script_runner.py | 2 +- .../test_render_BulletListContentBlock.py | 2 +- .../great_expectations/great_expectations.yml | 2 +- .../great_expectations/great_expectations.yml | 2 +- .../great_expectations/great_expectations.yml | 2 +- .../great_expectations/great_expectations.yml | 2 +- .../great_expectations/great_expectations.yml | 2 +- .../great_expectations/great_expectations.yml | 2 +- ..._expectations_basic_with_bad_notebooks.yml | 2 +- ..._expectations_custom_local_site_config.yml | 2 +- .../great_expectations_custom_notebooks.yml | 2 +- ...expectations_custom_notebooks_defaults.yml | 2 +- .../great_expectations_site_builder.yml | 2 +- .../great_expectations_v013_bad_notebooks.yml | 2 +- ...eat_expectations_v013_custom_notebooks.yml | 2 +- .../great_expectations_v013_site_builder.yml | 2 +- ...figuredAssetFileSystemExample_Pandas.ipynb | 2 +- ...nfiguredAssetFileSystemExample_Spark.ipynb | 2 +- .../uncommitted/config_variables.yml | 2 +- .../UpgradeHelperV11_basic_upgrade_log.json | 8 +- ...rV11_basic_upgrade_with_exception_log.json | 8 +- ...adeHelperV11_manual_steps_upgrade_log.json | 8 +- .../uncommitted/config_variables.yml | 2 +- .../upgrade_helper/great_expectations_v2.yml | 2 +- .../checkpoints/titanic_checkpoint_0.yml | 2 +- .../checkpoints/titanic_checkpoint_1.yml | 4 +- .../uncommitted/config_variables.yml | 2 +- .../uncommitted/config_variables.yml | 2 +- .../checkpoints/titanic_checkpoint_0.yml | 2 +- .../checkpoints/titanic_checkpoint_1.yml | 4 +- .../uncommitted/config_variables.yml | 2 +- ...configuration_without_checkpoint_store.yml | 2 +- ...ic_project_upgrade_expected_stdout.fixture | 4 +- ...oject_upgrade_expected_v012_stdout.fixture | 4 +- ...ade_with_exception_expected_stdout.fixture | 2 +- ...ith_exception_expected_v012_stdout.fixture | 2 +- ..._with_manual_steps_expected_stdout.fixture | 2 +- ..._manual_steps_expected_v012_stdout.fixture | 2 +- ...oject_upgrade_expected_v012_stdout.fixture | 2 +- ...rade_with_manual_steps_checkpoints.fixture | 2 +- ...lidation_operators_expected_stdout.fixture | 2 +- ...thout_manual_steps_expected_stdout.fixture | 2 +- tests/test_great_expectations.py | 2 +- tests/validator/test_validator.py | 4 +- 215 files changed, 799 insertions(+), 817 deletions(-) diff --git a/SLACK_GUIDELINES.md b/SLACK_GUIDELINES.md index 2950eff44288..08877083c691 100644 --- a/SLACK_GUIDELINES.md +++ b/SLACK_GUIDELINES.md @@ -4,7 +4,7 @@ We cannot stress enough that we want this to be a safe, comfortable and inclusive environment. Please read our [code of conduct](https://github.com/great-expectations/great_expectations/blob/develop/CODE_OF_CONDUCT.md) if you need more information on this guideline. ## Keep timezones in mind and be respectful of peoples’ time. -People on Slack are distributed and might be in a very different time zone from you, so don't use @channel @here (this is reserved for admins anyways). Before you @-mention someone, think about what timezone they are in and if you are likely to disturb them. You can check someone's timezone in their profile. As of today, the core GE team is based solely in the United States but the community is world wide. +People on Slack are distributed and might be in a very different time zone from you, so don't use @channel @here (this is reserved for admins anyways). Before you @-mention someone, think about what timezone they are in and if you are likely to disturb them. You can check someone's timezone in their profile. As of today, the core GX team is based solely in the United States but the community is world wide. If you post in off hours be patient, Someone will get back to you once the sun comes up. @@ -13,7 +13,7 @@ If you post in off hours be patient, Someone will get back to you once the sun c - Do your best to try and solve the problem first as your efforts will help us more easily answer the question. - [Read "How to write a good question in Slack"](https://github.com/great-expectations/great_expectations/discussions/4951) - Head over to our [Documentation](https://docs.greatexpectations.io/en/latest/) -- Checkout [GitHub Discussions](https://github.com/great-expectations/great_expectations/discussions) this is where we want most of our problem solving, discussion, updates, etc to go because it helps keep a more visible record for GE users. +- Checkout [GitHub Discussions](https://github.com/great-expectations/great_expectations/discussions) this is where we want most of our problem solving, discussion, updates, etc to go because it helps keep a more visible record for GX users. #### Asking your question in Slack diff --git a/azure-pipelines-dev.yml b/azure-pipelines-dev.yml index 087dfbd36319..ab171fd11065 100644 --- a/azure-pipelines-dev.yml +++ b/azure-pipelines-dev.yml @@ -72,7 +72,7 @@ stages: tests/integration/fixtures/** tests/test_sets/** - [GEChanged] + [GXChanged] great_expectations/**/*.py pyproject.toml setup.cfg @@ -89,7 +89,7 @@ stages: jobs: - job: lint - condition: eq(stageDependencies.scope_check.changes.outputs['CheckChanges.GEChanged'], true) + condition: eq(stageDependencies.scope_check.changes.outputs['CheckChanges.GXChanged'], true) steps: - task: UsePythonVersion@0 inputs: @@ -156,10 +156,10 @@ stages: - script: | pip install . - displayName: 'Install GE and required dependencies (i.e. not sqlalchemy)' + displayName: 'Install GX and required dependencies (i.e. not sqlalchemy)' - script: | - python -c "import great_expectations as gx; print('Successfully imported GE Version:', gx.__version__)" + python -c "import great_expectations as gx; print('Successfully imported GX Version:', gx.__version__)" displayName: 'Import Great Expectations' - stage: required @@ -170,7 +170,7 @@ stages: jobs: # Runs pytest without any additional flags - job: minimal - condition: eq(stageDependencies.scope_check.changes.outputs['CheckChanges.GEChanged'], true) + condition: eq(stageDependencies.scope_check.changes.outputs['CheckChanges.GXChanged'], true) strategy: # This matrix is intended to split up our sizeable test suite into two distinct components. # By splitting up slow tests from the remainder of the suite, we can parallelize test runs @@ -248,7 +248,7 @@ stages: # Runs pytest with Spark and Postgres enabled - job: comprehensive - condition: eq(stageDependencies.scope_check.changes.outputs['CheckChanges.GEChanged'], true) + condition: eq(stageDependencies.scope_check.changes.outputs['CheckChanges.GXChanged'], true) strategy: # This matrix is intended to split up our sizeable test suite into two distinct components. # By splitting up slow tests from the remainder of the suite, we can parallelize test runs @@ -323,7 +323,7 @@ stages: jobs: - job: test_usage_stats_messages - condition: eq(stageDependencies.scope_check.changes.outputs['CheckChanges.GEChanged'], true) + condition: eq(stageDependencies.scope_check.changes.outputs['CheckChanges.GXChanged'], true) variables: python.version: '3.8' @@ -359,7 +359,7 @@ stages: jobs: - job: mysql - condition: eq(stageDependencies.scope_check.changes.outputs['CheckChanges.GEChanged'], true) + condition: eq(stageDependencies.scope_check.changes.outputs['CheckChanges.GXChanged'], true) services: mysql: mysql @@ -416,7 +416,7 @@ stages: GE_USAGE_STATISTICS_URL: ${{ variables.GE_USAGE_STATISTICS_URL }} - job: mssql - condition: eq(stageDependencies.scope_check.changes.outputs['CheckChanges.GEChanged'], true) + condition: eq(stageDependencies.scope_check.changes.outputs['CheckChanges.GXChanged'], true) services: mssql: mssql @@ -463,7 +463,7 @@ stages: GE_USAGE_STATISTICS_URL: ${{ variables.GE_USAGE_STATISTICS_URL }} - job: trino - condition: eq(stageDependencies.scope_check.changes.outputs['CheckChanges.GEChanged'], true) + condition: eq(stageDependencies.scope_check.changes.outputs['CheckChanges.GXChanged'], true) services: trino: trino @@ -522,7 +522,7 @@ stages: jobs: - job: test_cli - condition: eq(stageDependencies.scope_check.changes.outputs['CheckChanges.GEChanged'], true) + condition: eq(stageDependencies.scope_check.changes.outputs['CheckChanges.GXChanged'], true) services: postgres: postgres diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 1a741845a91a..3262e3449702 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -466,10 +466,10 @@ stages: - script: | pip install . - displayName: 'Install GE and required dependencies (i.e. not sqlalchemy)' + displayName: 'Install GX and required dependencies (i.e. not sqlalchemy)' - script: | - python -c "import great_expectations as gx; print('Successfully imported GE Version:', gx.__version__)" + python -c "import great_expectations as gx; print('Successfully imported GX Version:', gx.__version__)" displayName: 'Import Great Expectations' - stage: db_integration diff --git a/azure/user-install-matrix.yml b/azure/user-install-matrix.yml index 49b722f5cf9e..0d8869003989 100644 --- a/azure/user-install-matrix.yml +++ b/azure/user-install-matrix.yml @@ -20,7 +20,7 @@ jobs: - script: | great_expectations --version great_expectations -y init --no-usage-stats - python -c "import great_expectations as gx; print('Successfully imported GE Version:', gx.__version__)" + python -c "import great_expectations as gx; print('Successfully imported GX Version:', gx.__version__)" displayName: 'Confirm installation' - job: @@ -47,5 +47,5 @@ jobs: source activate ge_dev great_expectations --version great_expectations -y init --no-usage-stats - python -c "import great_expectations as gx; print('Successfully imported GE Version:', gx.__version__)" + python -c "import great_expectations as gx; print('Successfully imported GX Version:', gx.__version__)" displayName: 'Confirm installation' diff --git a/contrib/capitalone_dataprofiler_expectations/README.md b/contrib/capitalone_dataprofiler_expectations/README.md index 36618d420a54..f600dfa2e0b2 100644 --- a/contrib/capitalone_dataprofiler_expectations/README.md +++ b/contrib/capitalone_dataprofiler_expectations/README.md @@ -38,7 +38,7 @@ If you have suggestions or find a bug, [please open an issue](https://github.com If you want to install the ml dependencies without generating reports use `DataProfiler[ml]` -If the ML requirements are too strict (say, you don't want to install tensorflow), you can install a slimmer package with `DataProfiler[reports]`. The slimmer package disables the default sensitive data detection / entity recognition (labler) +If the ML requirements are too strict (say, you don't want to install tensorflow), you can install a slimmer package with `DataProfiler[reports]`. The slimmer package disables the default sensitive data detection / entity recognition (labler) Install from pypi: `pip install DataProfiler` @@ -47,7 +47,7 @@ Install from pypi: `pip install DataProfiler` # What is a Data Profile? -In the case of this library, a data profile is a dictionary containing statistics and predictions about the underlying dataset. There are "global statistics" or `global_stats`, which contain dataset level data and there are "column/row level statistics" or `data_stats` (each column is a new key-value entry). +In the case of this library, a data profile is a dictionary containing statistics and predictions about the underlying dataset. There are "global statistics" or `global_stats`, which contain dataset level data and there are "column/row level statistics" or `data_stats` (each column is a new key-value entry). The format for a structured profile is below: @@ -57,7 +57,7 @@ The format for a structured profile is below: "column_count": int, "row_count": int, "row_has_null_ratio": float, - "row_is_null_ratio": float, + "row_is_null_ratio": float, "unique_row_ratio": float, "duplicate_row_count": int, "file_type": string, @@ -84,11 +84,11 @@ The format for a structured profile is below: "null_types_index": { string: list[int] }, - "data_type_representation": dict[string, float], + "data_type_representation": dict[string, float], "min": [null, float, str], "max": [null, float, str], "mode": float, - "median": float, + "median": float, "median_absolute_deviation": float, "sum": float, "mean": float, @@ -98,7 +98,7 @@ The format for a structured profile is below: "kurtosis": float, "num_zeros": int, "num_negatives": int, - "histogram": { + "histogram": { "bin_counts": list[int], "bin_edges": list[float], }, @@ -106,7 +106,7 @@ The format for a structured profile is below: int: float }, "vocab": list[char], - "avg_predictions": dict[string, float], + "avg_predictions": dict[string, float], "data_label_representation": dict[string, float], "categories": list[str], "unique_count": int, @@ -122,7 +122,7 @@ The format for a structured profile is below: 'std': float, 'sample_size': int, 'margin_of_error': float, - 'confidence_level': float + 'confidence_level': float }, "times": dict[string, float], "format": string @@ -180,7 +180,7 @@ The format for an unstructured profile is below: * `duplicate_row_count` - the number of rows that occur more than once in the input dataset * `file_type` - the format of the file containing the input dataset (ex: .csv) * `encoding` - the encoding of the file containing the input dataset (ex: UTF-8) -* `correlation_matrix` - matrix of shape `column_count` x `column_count` containing the correlation coefficients between each column in the dataset +* `correlation_matrix` - matrix of shape `column_count` x `column_count` containing the correlation coefficients between each column in the dataset * `chi2_matrix` - matrix of shape `column_count` x `column_count` containing the chi-square statistics between each column in the dataset * `profile_schema` - a description of the format of the input dataset labeling each column and its index in the dataset * `string` - the label of the column in question and its index in the profile schema @@ -289,7 +289,7 @@ The format for an unstructured profile is below: * BAN (bank account number, 10-18 digits) * CREDIT_CARD * EMAIL_ADDRESS -* UUID +* UUID * HASH_OR_KEY (md5, sha1, sha256, random hash, etc.) * IPV4 * IPV6 @@ -328,7 +328,7 @@ Along with other attributtes the `Data class` enables data to be accessed via a ```python # Load a csv file, return a CSVData object -csv_data = Data('your_file.csv') +csv_data = Data('your_file.csv') # Print the first 10 rows of the csv file print(csv_data.data.head(10)) @@ -346,10 +346,10 @@ print(parquet_data.data.head(10)) json_data = Data('https://github.com/capitalone/DataProfiler/blob/main/dataprofiler/tests/data/json/iris-utf-8.json') ``` -If the file type is not automatically identified (rare), you can specify them +If the file type is not automatically identified (rare), you can specify them specifically, see section [Specifying a Filetype or Delimiter](#specifying-a-filetype-or-delimiter). -### Profile a File +### Profile a File Example uses a CSV file for example, but CSV, JSON, Avro, Parquet or Text should also work. @@ -358,7 +358,7 @@ import json from dataprofiler import Data, Profiler # Load file (CSV should be automatically identified) -data = Data("your_file.csv") +data = Data("your_file.csv") # Profile the dataset profile = Profiler(data) @@ -395,7 +395,7 @@ Note that if the data you update the profile with contains integer indices that ### Merging Profiles -If you have two files with the same schema (but different data), it is possible to merge the two profiles together via an addition operator. +If you have two files with the same schema (but different data), it is possible to merge the two profiles together via an addition operator. This also enables profiles to be determined in a distributed manner. @@ -422,8 +422,8 @@ Note that if merged profiles had overlapping integer indices, when null rows are ### Profiler Differences For finding the change between profiles with the same schema we can utilize the -profile's `diff` function. The diff will provide overall file and sampling -differences as well as detailed differences of the data's statistics. For +profile's `diff` function. The diff will provide overall file and sampling +differences as well as detailed differences of the data's statistics. For example, numerical columns have a t-test applied to evaluate similarity. More information is described in the Profiler section of the [Github Pages]( https://capitalone.github.io/DataProfiler/). @@ -463,7 +463,7 @@ print(json.dumps(report["data_stats"][0], indent=4)) ``` ### Unstructured profiler -In addition to the structured profiler, DataProfiler provides unstructured profiling for the TextData object or string. The unstructured profiler also works with list[string], pd.Series(string) or pd.DataFrame(string) given profiler_type option specified as `unstructured`. Below is an example of the unstructured profiler with a text file. +In addition to the structured profiler, DataProfiler provides unstructured profiling for the TextData object or string. The unstructured profiler also works with list[string], pd.Series(string) or pd.DataFrame(string) given profiler_type option specified as `unstructured`. Below is an example of the unstructured profiler with a text file. ```python import dataprofiler as dp import json @@ -500,4 +500,4 @@ Authors: Anh Truong, Austin Walters, Jeremy Goodsitt The AAAI-21 Workshop on Knowledge Discovery from Unstructured Data in Financial Services ``` -GE Integration Author: Taylor Turner ([taylorfturner](https://github.com/taylorfturner)) +GX Integration Author: Taylor Turner ([taylorfturner](https://github.com/taylorfturner)) diff --git a/docker/Dockerfile b/docker/Dockerfile index 0ea3ad52d331..7b2bc4863e7a 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -2,19 +2,19 @@ ARG PYTHON_DOCKER_TAG FROM python:${PYTHON_DOCKER_TAG} -ARG GE_EXTRA_DEPS="spark,sqlalchemy,redshift,s3,gcp,snowflake" +ARG GX_EXTRA_DEPS="spark,sqlalchemy,redshift,s3,gcp,snowflake" ENV PYTHONIOENCODING utf-8 ENV LANG C.UTF-8 ENV HOME /root ENV PATH /usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:${HOME}/.local/bin -# Path where the root of the GE project will be expected -ENV GE_HOME /usr/app/great_expectations +# Path where the root of the GX project will be expected +ENV GX_HOME /usr/app/great_expectations LABEL maintainer="great-expectations" LABEL org.opencontainers.image.title="Great Expectations" LABEL org.opencontainers.image.description="Great Expectations. Always know what to expect from your data." -LABEL org.opencontainers.image.version=${GE_VERSION} +LABEL org.opencontainers.image.version=${GX_VERSION} LABEL org.opencontainers.image.created=${CREATED} LABEL org.opencontainers.image.url="https://github.com/great-expectations/great_expectations" LABEL org.opencontainers.image.documentation="https://github.com/great-expectations/great_expectations" @@ -29,10 +29,10 @@ COPY . /tmp/great_expectations_install RUN mkdir -p /usr/app ${HOME} && \ cd /tmp/great_expectations_install && \ - pip install .[${GE_EXTRA_DEPS}] && \ + pip install .[${GX_EXTRA_DEPS}] && \ rm -rf /tmp/great_expectations_install -WORKDIR ${GE_HOME} +WORKDIR ${GX_HOME} ENTRYPOINT ["great_expectations"] CMD ["--help"] diff --git a/docs/api_docs/classes/great_expectations-data_context-data_context-data_context-DataContext.md b/docs/api_docs/classes/great_expectations-data_context-data_context-data_context-DataContext.md index 5f6d3292dddb..66f081d462b6 100644 --- a/docs/api_docs/classes/great_expectations-data_context-data_context-data_context-DataContext.md +++ b/docs/api_docs/classes/great_expectations-data_context-data_context-data_context-DataContext.md @@ -11,7 +11,7 @@ deployment, with configurations and methods for all supporting components. The DataContext is configured via a yml file stored in a directory called great_expectations; this configuration file as well as managed Expectation Suites should be stored in version control. There are other ways to create a -Data Context that may be better suited for your particular deployment e.g. ephemerally or backed by GE Cloud +Data Context that may be better suited for your particular deployment e.g. ephemerally or backed by GX Cloud (coming soon). Please refer to our documentation for more details. You can Validate data or generate Expectations using Execution Engines including: diff --git a/docs/contributing/style_guides/docs_style.md b/docs/contributing/style_guides/docs_style.md index 56c167e2ac92..75c32a508f9a 100644 --- a/docs/contributing/style_guides/docs_style.md +++ b/docs/contributing/style_guides/docs_style.md @@ -30,7 +30,7 @@ This style guide will be enforced for all incoming PRs. However, certain legacy ::: -* The **project name “Great Expectations” is always spaced and capitalized.** Good: “Great Expectations”. Bad: “great_expectations”, “great expectations”, “GE.” +* The **project name “Great Expectations” is always spaced and capitalized.** Good: “Great Expectations”. Bad: “great_expectations”, “great expectations”, “GX.” * **We refer to ourselves in the first person plural.** Good: “we”, “our”. Bad: “I”. This helps us avoid awkward passive sentences. Occasionally, we refer to ourselves as “the Great Expectations team” (or community) for clarity. diff --git a/docs/deployment_patterns/how_to_use_great_expectations_in_flyte.md b/docs/deployment_patterns/how_to_use_great_expectations_in_flyte.md index d45619580a05..0542dafd8505 100644 --- a/docs/deployment_patterns/how_to_use_great_expectations_in_flyte.md +++ b/docs/deployment_patterns/how_to_use_great_expectations_in_flyte.md @@ -99,7 +99,7 @@ def file_task( @workflow def file_wf( - dataset: CSVFile = "https://raw.githubusercontent.com/superconductive/ge_tutorials/main/data/yellow_tripdata_sample_2019-01.csv", + dataset: CSVFile = "https://raw.githubusercontent.com/great-expectations/gx_tutorials/main/data/yellow_tripdata_sample_2019-01.csv", ) -> int: return file_task(dataset=dataset) @@ -156,7 +156,7 @@ def to_df(dataset: str) -> pd.DataFrame: def schema_wf() -> int: return schema_task( dataframe=to_df( - dataset="https://raw.githubusercontent.com/superconductive/ge_tutorials/main/data/yellow_tripdata_sample_2019-01.csv" + dataset="https://raw.githubusercontent.com/great-expectations/gx_tutorials/main/data/yellow_tripdata_sample_2019-01.csv" ) ) diff --git a/docs/deployment_patterns/how_to_use_great_expectations_with_google_cloud_platform_and_bigquery.md b/docs/deployment_patterns/how_to_use_great_expectations_with_google_cloud_platform_and_bigquery.md index 93df1a49331f..418b00f0c1d2 100644 --- a/docs/deployment_patterns/how_to_use_great_expectations_with_google_cloud_platform_and_bigquery.md +++ b/docs/deployment_patterns/how_to_use_great_expectations_with_google_cloud_platform_and_bigquery.md @@ -7,7 +7,7 @@ import TabItem from '@theme/TabItem'; import Congratulations from '../guides/connecting_to_your_data/components/congratulations.md' import TechnicalTag from '@site/docs/term_tags/_tag.mdx'; -This guide will help you integrate Great Expectations (GE) with [Google Cloud Platform](https://cloud.google.com/gcp) (GCP) using our recommended workflow. +This guide will help you integrate Great Expectations (GX) with [Google Cloud Platform](https://cloud.google.com/gcp) (GCP) using our recommended workflow. @@ -26,7 +26,7 @@ We recommend that you use Great Expectations in GCP by using the following servi - [Google App Engine](https://cloud.google.com/appengine) (GAE) for hosting and controlling access to . We also recommend that you deploy Great Expectations to GCP in two steps: -1. [Developing a local configuration for GE that uses GCP services to connect to your data, store Great Expectations metadata, and run a Checkpoint.](#part-1-local-configuration-of-great-expectations-that-connects-to-google-cloud-platform) +1. [Developing a local configuration for GX that uses GCP services to connect to your data, store Great Expectations metadata, and run a Checkpoint.](#part-1-local-configuration-of-great-expectations-that-connects-to-google-cloud-platform) 2. [Migrating the local configuration to Cloud Composer so that the workflow can be orchestrated automatically on GCP.](#part-2-migrating-our-local-configuration-to-cloud-composer) The following diagram shows the recommended components for a Great Expectations deployment in GCP: @@ -327,7 +327,7 @@ Now you are ready to migrate the local configuration to Cloud Composer. ## Part 2: Migrating our Local Configuration to Cloud Composer -We will now take the local GE configuration from [Part 1](#part-1-local-configuration-of-great-expectations-that-connects-to-google-cloud-platform) and migrate it to a Cloud Composer environment so that we can automate the workflow. +We will now take the local GX configuration from [Part 1](#part-1-local-configuration-of-great-expectations-that-connects-to-google-cloud-platform) and migrate it to a Cloud Composer environment so that we can automate the workflow. There are a number of ways that Great Expectations can be run in Cloud Composer or Airflow. @@ -469,6 +469,6 @@ There are many ways to iterate and improve this initial version, which used a `b Also, the following scripts and configurations can be found here: -- Local GE configuration used in this guide can be found in the [`great-expectations` GIT repository](https://github.com/great-expectations/great_expectations/tree/develop/tests/integration/fixtures/gcp_deployment/). +- Local GX configuration used in this guide can be found in the [`great-expectations` GIT repository](https://github.com/great-expectations/great_expectations/tree/develop/tests/integration/fixtures/gcp_deployment/). - [Script to test BigQuery configuration](https://github.com/great-expectations/great_expectations/blob/develop/tests/integration/docusaurus/deployment_patterns/gcp_deployment_patterns_file_bigquery_yaml_configs.py). - [Script to test GCS configuration](https://github.com/great-expectations/great_expectations/blob/develop/tests/integration/docusaurus/deployment_patterns/gcp_deployment_patterns_file_gcs_yaml_configs.py). diff --git a/docs/guides/connecting_to_your_data/cloud/gcs/pandas.md b/docs/guides/connecting_to_your_data/cloud/gcs/pandas.md index ba71d4fcf9b5..aca3a9f3f8da 100644 --- a/docs/guides/connecting_to_your_data/cloud/gcs/pandas.md +++ b/docs/guides/connecting_to_your_data/cloud/gcs/pandas.md @@ -61,7 +61,7 @@ It is also important to note that GCS `DataConnectors` support various methods o - This argument should contain the actual JSON data from your credentials file in the form of a string. - This method utilizes `google.oauth2.service_account.Credentials.from_service_account_info` under the hood. -Please note that if you use the `filename` or `info` options, you must supply these options to any GE objects that interact with GCS (i.e. `PandasExecutionEngine`). +Please note that if you use the `filename` or `info` options, you must supply these options to any GX objects that interact with GCS (i.e. `PandasExecutionEngine`). The `gcs_options` dictionary is also responsible for storing any `**kwargs` you wish to pass to the GCS `storage.Client()` connection object (i.e. `project`) For more details regarding storing credentials for use with Great Expectations see: [How to configure credentials](../../../setup/configuring_data_contexts/how_to_configure_credentials.md) diff --git a/docs/guides/connecting_to_your_data/how_to_configure_an_inferredassetdataconnector.md b/docs/guides/connecting_to_your_data/how_to_configure_an_inferredassetdataconnector.md index b66c212ae48f..5f9975233dcc 100644 --- a/docs/guides/connecting_to_your_data/how_to_configure_an_inferredassetdataconnector.md +++ b/docs/guides/connecting_to_your_data/how_to_configure_an_inferredassetdataconnector.md @@ -123,7 +123,7 @@ Imagine you have the following files in `my_directory/`: /yellow_tripdata_2019-03.csv ``` -We can imagine two approaches to loading the data into GE. +We can imagine two approaches to loading the data into GX. The simplest approach would be to consider each file to be its own Data Asset. In that case, the configuration would look like the following: diff --git a/docs/guides/expectations/creating_custom_expectations/how_to_use_custom_expectations.md b/docs/guides/expectations/creating_custom_expectations/how_to_use_custom_expectations.md index 868694d037f3..efd00b45b2b3 100644 --- a/docs/guides/expectations/creating_custom_expectations/how_to_use_custom_expectations.md +++ b/docs/guides/expectations/creating_custom_expectations/how_to_use_custom_expectations.md @@ -80,7 +80,7 @@ To do this, execute the following from your command line: great_expectations checkpoint script ``` -This will create a script in your GE directory at `great_expectations/uncommitted/run_my_checkpoint_name.py`. +This will create a script in your GX directory at `great_expectations/uncommitted/run_my_checkpoint_name.py`. That script can be edited that script to include the Custom Expectation import(s) you need: ```python @@ -163,7 +163,7 @@ To do this, execute the following from your command line: great_expectations checkpoint script ``` -This will create a script in your GE directory at `great_expectations/uncommitted/run_my_checkpoint_name.py`. +This will create a script in your GX directory at `great_expectations/uncommitted/run_my_checkpoint_name.py`. That script can be edited that script to include the Custom Expectation import(s) you need: ```python diff --git a/docs/guides/miscellaneous/how_to_use_the_great_expectations_cli.md b/docs/guides/miscellaneous/how_to_use_the_great_expectations_cli.md index 34bfd71e548b..4cca25b3820e 100644 --- a/docs/guides/miscellaneous/how_to_use_the_great_expectations_cli.md +++ b/docs/guides/miscellaneous/how_to_use_the_great_expectations_cli.md @@ -69,10 +69,10 @@ Commands: new Add a new Datasource to the data context. ``` -An existing data context can be provided with the variable `GE_HOME`, for example: +An existing data context can be provided with the variable `GX_HOME`, for example: ```bash -export GE_HOME="${HOME}/ge_dir_1/great_expectations" +export GX_HOME="${HOME}/ge_dir_1/great_expectations" great_expectations datasource list ``` diff --git a/docs/guides/miscellaneous/how_to_write_a_how_to_guide.md b/docs/guides/miscellaneous/how_to_write_a_how_to_guide.md index 4eb520043e62..22d0a37a379c 100644 --- a/docs/guides/miscellaneous/how_to_write_a_how_to_guide.md +++ b/docs/guides/miscellaneous/how_to_write_a_how_to_guide.md @@ -184,7 +184,7 @@ Most guides are code-heavy. When writing a guide that could go either way, pleas #### Using tabs to differentiate guides for different APIs. -During the process of writing documentation for Great Expectations 0.13, there arose a need to differentiate between documentation for GE up to 0.12.x, and GE 0.13 and beyond. +During the process of writing documentation for Great Expectations 0.13, there arose a need to differentiate between documentation for GX up to 0.12.x, and GX 0.13 and beyond. The use of content-tabs allows for both documentation to co-exist in the same how-to-doc. diff --git a/docs/guides/miscellaneous/migration_guide.md b/docs/guides/miscellaneous/migration_guide.md index 4c38ce10f5b8..82277c9dc9ec 100644 --- a/docs/guides/miscellaneous/migration_guide.md +++ b/docs/guides/miscellaneous/migration_guide.md @@ -21,7 +21,7 @@ The Batch Request (V3) API was introduced as part of the 0.13 major release of G ## Migrating to the Batch Request (V3) API -As of version 0.14.0, the V3 API is the preferred method of interacting with GE. We highly recommend that you migrate to working with the V3 API as soon as possible. Please make sure you're using the latest version of GE before beginning your migration! +As of version 0.14.0, the V3 API is the preferred method of interacting with GX. We highly recommend that you migrate to working with the V3 API as soon as possible. Please make sure you're using the latest version of GX before beginning your migration! The migration involves two parts: first, using an automated CLI tool to upgrade the config file and Data Stores, and second, manually upgrading Datasources and Checkpoints. To begin the migration from the V2 to the V3 API, please do the following: @@ -155,14 +155,14 @@ Now you are ready to manually migrate Datasources and Checkpoints to be compatib ### Manually migrate Datasources from V2 to V3 -The first manual step needed is to convert the V2-style Datasource to a V3-style one. The following documentation -contains examples for data read-in using `pandas`, `spark`, and a database, using `postgresql` as an example. +The first manual step needed is to convert the V2-style Datasource to a V3-style one. The following documentation +contains examples for data read-in using `pandas`, `spark`, and a database, using `postgresql` as an example. :::tip -The configurations for `pandas`, `spark` and `postgresql` shown in this guide are available as part of the `great-expectations` repository. +The configurations for `pandas`, `spark` and `postgresql` shown in this guide are available as part of the `great-expectations` repository. Please feel free to use the complete-and-working configurations found [here](https://github.com/great-expectations/great_expectations/tree/develop/tests/test_fixtures/configuration_for_testing_v2_v3_migration) -to help with your migration. +to help with your migration. ::: @@ -195,7 +195,7 @@ The V3-style Datasource has: #### V3-Style Datasource ```yaml file=../../../tests/test_fixtures/configuration_for_testing_v2_v3_migration/pandas/v3/great_expectations/great_expectations.yml#L16-L31 ``` - +
More details on base_directory The base_directory is set to ../../../data/ according to the example Pandas configuration which can be found in the great_expectations repository. @@ -270,9 +270,9 @@ One exception to the datatype-agnostic Datasource in the V3 API is the Sim
-Migrating Datasource configurations that contain connections to databases involve additional parameters like credentials that are specific to each configuration. The how-to-guides for Great Expectations contain numerous examples of V3 configurations that can be used for these various situations. +Migrating Datasource configurations that contain connections to databases involve additional parameters like credentials that are specific to each configuration. The how-to-guides for Great Expectations contain numerous examples of V3 configurations that can be used for these various situations. -Please check out the following docs for examples of V3-style Datasource configurations that will suit your needs: +Please check out the following docs for examples of V3-style Datasource configurations that will suit your needs: - [How to connect to a Athena database](../connecting_to_your_data/database/athena.md) - [How to connect to a BigQuery database](../connecting_to_your_data/database/bigquery.md) @@ -288,11 +288,11 @@ Please check out the following docs for examples of V3-style Datasource configur :::tip -Before doing the migration, we recommend that you create a backup of your V2 Checkpoints. Checkpoints are typically stored as `.yml` configuration files in the `checkpoints/` directory of your `great_expectations/` folder. We recommend that you make a backup copy of these files or the directory. +Before doing the migration, we recommend that you create a backup of your V2 Checkpoints. Checkpoints are typically stored as `.yml` configuration files in the `checkpoints/` directory of your `great_expectations/` folder. We recommend that you make a backup copy of these files or the directory. ::: -In Great Expectations version 0.13.7, we introduced an improved Checkpoints feature, which allowed Checkpoints to utilize features the V3 API. As a result, Checkpoints are now able to [filter and sort batches from configured datasources](../connecting_to_your_data/how_to_get_one_or_more_batches_of_data_from_a_configured_datasource.md), [introspect and partition tables as batches](../connecting_to_your_data/how_to_configure_a_dataconnector_to_introspect_and_partition_tables_in_sql.md), with multi-batch Expectations soon to come. As part of these design improvements, Validation Operators (originally located in the `great_expectations.yml` file) were combined into Checkpoint configurations. +In Great Expectations version 0.13.7, we introduced an improved Checkpoints feature, which allowed Checkpoints to utilize features the V3 API. As a result, Checkpoints are now able to [filter and sort batches from configured datasources](../connecting_to_your_data/how_to_get_one_or_more_batches_of_data_from_a_configured_datasource.md), [introspect and partition tables as batches](../connecting_to_your_data/how_to_configure_a_dataconnector_to_introspect_and_partition_tables_in_sql.md), with multi-batch Expectations soon to come. As part of these design improvements, Validation Operators (originally located in the `great_expectations.yml` file) were combined into Checkpoint configurations. This means that, although Validation Operators were run directly from the DataContext in V2, they are now run by Checkpoints in V3 as part of `action_list` items. This change offers a convenient abstraction for running Validations and ensures that all actions associated with running validations are included in one place, rather than split up between the `great_expectations.yml` file and Checkpoint configuration. @@ -322,10 +322,10 @@ The example V3-style Checkpoint contains: - A `action_list`, which contain a list of actions associated with the Validation Results (e.g., saving them for a later review, sending notifications in case of failures, etc.). These were known as Validation Operators in V2-style Checkpoints. :::note Migrating ExpectationSuites - - `ExpectationSuites` that were created in the V2-API will work in the V3-API **without** needing to be modified. However, `ExpectationSuites` also contain `metadata` describing the `batch` that was used to create the original `ExpectationSuite` object (under the `citations` field). For a suite that was created in V2, this metadata will contain `batch_kwargs`, and V3 suites will contain a `batch_request`. - - If you choose to do so, the `citation` metadata can be migrated using the same pattern for migrating `batch_kwargs` to `batch_request` described below. + + `ExpectationSuites` that were created in the V2-API will work in the V3-API **without** needing to be modified. However, `ExpectationSuites` also contain `metadata` describing the `batch` that was used to create the original `ExpectationSuite` object (under the `citations` field). For a suite that was created in V2, this metadata will contain `batch_kwargs`, and V3 suites will contain a `batch_request`. + + If you choose to do so, the `citation` metadata can be migrated using the same pattern for migrating `batch_kwargs` to `batch_request` described below. ::: @@ -402,10 +402,10 @@ The example V3-style Checkpoint contains: - A `action_list`, which contain a list of actions associated with the Validation Results (e.g., saving them for a later review, sending notifications in case of failures, etc.). These were known as Validation Operators in V2-style Checkpoints. :::note Migrating ExpectationSuites - - `ExpectationSuites` that were created in the V2-API will work in the V3-API **without** needing to be modified. However, `ExpectationSuites` also contain `metadata` describing the `batch` that was used to create the original `ExpectationSuite` object (under the `citations` field). For a suite that was created in V2, this metadata will contain `batch_kwargs`, and V3 suites will contain a `batch_request`. - - If you choose to do so, the `citation` metadata can be migrated using the same pattern for migrating `batch_kwargs` to `batch_request` described below. + + `ExpectationSuites` that were created in the V2-API will work in the V3-API **without** needing to be modified. However, `ExpectationSuites` also contain `metadata` describing the `batch` that was used to create the original `ExpectationSuite` object (under the `citations` field). For a suite that was created in V2, this metadata will contain `batch_kwargs`, and V3 suites will contain a `batch_request`. + + If you choose to do so, the `citation` metadata can be migrated using the same pattern for migrating `batch_kwargs` to `batch_request` described below. ::: @@ -481,10 +481,10 @@ The example V3-style Checkpoint contains: - A `action_list`, which contain a list of actions associated with the Validation Results (e.g., saving them for a later review, sending notifications in case of failures, etc.). These were known as Validation Operators in V2-style Checkpoints. :::note Migrating ExpectationSuites - - `ExpectationSuites` that were created in the V2-API will work in the V3-API **without** needing to be modified. However, `ExpectationSuites` also contain `metadata` describing the `batch` that was used to create the original `ExpectationSuite` object (under the `citations` field). For a suite that was created in V2, this metadata will contain `batch_kwargs`, and V3 suites will contain a `batch_request`. - - If you choose to do so, the `citation` metadata can be migrated using the same pattern for migrating `batch_kwargs` to `batch_request` described below. + + `ExpectationSuites` that were created in the V2-API will work in the V3-API **without** needing to be modified. However, `ExpectationSuites` also contain `metadata` describing the `batch` that was used to create the original `ExpectationSuite` object (under the `citations` field). For a suite that was created in V2, this metadata will contain `batch_kwargs`, and V3 suites will contain a `batch_request`. + + If you choose to do so, the `citation` metadata can be migrated using the same pattern for migrating `batch_kwargs` to `batch_request` described below. ::: @@ -582,7 +582,7 @@ Using the new API: } ), match_type="success" - ) + ) ``` @@ -807,7 +807,7 @@ Follow these steps to upgrade your existing Great Expectations project: - The command will display this message when done: `Your config file appears valid!`. -- Rename your Expectation Suites to make them compatible with the new naming. Save this Python code snippet in a file called `update_project.py`, then run it using the command: `python update_project.py PATH_TO_GE_CONFIG_DIRECTORY`: +- Rename your Expectation Suites to make them compatible with the new naming. Save this Python code snippet in a file called `update_project.py`, then run it using the command: `python update_project.py PATH_TO_GX_CONFIG_DIRECTORY`: ```python # !/usr/bin/env python3 @@ -1028,7 +1028,7 @@ If you run into any issues, please ask for help on [Slack](https://greatexpectat ### Upgrading to 0.7.x -In version 0.7, GE introduced several new features, and significantly changed the way DataContext objects work: +In version 0.7, GX introduced several new features, and significantly changed the way DataContext objects work: - A object manages access to expectation suites and other configuration in addition to data assets. - It provides a flexible but opinionated structure for creating and storing configuration and expectations in version control. diff --git a/docs/integrations/integration_datahub.md b/docs/integrations/integration_datahub.md index 1047202373a2..dbbe94f167b9 100644 --- a/docs/integrations/integration_datahub.md +++ b/docs/integrations/integration_datahub.md @@ -25,9 +25,9 @@ There is a custom Action named `DataHubValidationAction` which allows you to vie `DataHubValidationAction` pushes Expectations metadata to DataHub. This includes -- **Expectation Details**: Details of assertions (i.e. Expectation) set on a Dataset (Table). Expectation set on a dataset in GE aligns with `AssertionInfo` aspect in DataHub. `AssertionInfo` captures the dataset and dataset fields on which assertion is applied, along with its scope, type and parameters. +- **Expectation Details**: Details of assertions (i.e. Expectation) set on a Dataset (Table). Expectation set on a dataset in GX aligns with `AssertionInfo` aspect in DataHub. `AssertionInfo` captures the dataset and dataset fields on which assertion is applied, along with its scope, type and parameters. - **Expectation Results**: Evaluation results for an assertion tracked over time. -Validation Result for an Expectation in GE align with `AssertionRunEvent` aspect in DataHub. `AssertionRunEvent` captures the time at which Validation was run, Batch(subset) of dataset on which it was run, the success status along with other result fields. +Validation Result for an Expectation in GX align with `AssertionRunEvent` aspect in DataHub. `AssertionRunEvent` captures the time at which Validation was run, Batch(subset) of dataset on which it was run, the success status along with other result fields. ### Dev loops unlocked by integration @@ -49,7 +49,7 @@ Stand up and take a breath ::: #### 1. Ingest the metadata from source data platform into DataHub -For example, if you have GE Checkpoint that runs Expectations on a BigQuery dataset, then first +For example, if you have GX Checkpoint that runs Expectations on a BigQuery dataset, then first ingest the respective dataset into DataHub using [BigQuery](https://datahubproject.io/docs/generated/ingestion/sources/bigquery#module-bigquery) metadata ingestion source recipe. ```bash @@ -57,8 +57,8 @@ datahub ingest -c recipe.yaml ``` You should be able to see the dataset in DataHub UI. -#### 2. Update GE Checkpoint Configurations -Add `DataHubValidationAction` in `action_list` of your Great Expectations Checkpoint. For more details on setting action_list, see [the configuration section of the GE Actions reference entry](https://docs.greatexpectations.io/docs/terms/action#configuration) +#### 2. Update GX Checkpoint Configurations +Add `DataHubValidationAction` in `action_list` of your Great Expectations Checkpoint. For more details on setting action_list, see [the configuration section of the GX Actions reference entry](https://docs.greatexpectations.io/docs/terms/action#configuration) ```yml action_list: - name: datahub_action @@ -71,7 +71,7 @@ action_list: **Configuration options:** - `server_url` (required): URL of DataHub GMS endpoint - `env` (optional, defaults to "PROD"): Environment to use in namespace when constructing dataset URNs. -- `platform_instance_map` (optional): Platform instance mapping to use when constructing dataset URNs. Maps the GE 'data source' name to a platform instance on DataHub. e.g. `platform_instance_map: { "datasource_name": "warehouse" }` +- `platform_instance_map` (optional): Platform instance mapping to use when constructing dataset URNs. Maps the GX 'data source' name to a platform instance on DataHub. e.g. `platform_instance_map: { "datasource_name": "warehouse" }` - `graceful_exceptions` (defaults to true): If set to true, most runtime errors in the lineage backend will be suppressed and will not cause the overall Checkpoint to fail. Note that configuration issues will still throw exceptions. - `token` (optional): Bearer token used for authentication. - `timeout_sec` (optional): Per-HTTP request timeout. @@ -80,7 +80,7 @@ action_list: - `extra_headers` (optional): Extra headers which will be added to the datahub request. - `parse_table_names_from_sql` (defaults to false): The integration can use an SQL parser to try to parse the datasets being asserted. This parsing is disabled by default, but can be enabled by setting `parse_table_names_from_sql: True`. The parser is based on the [`sqllineage`](https://pypi.org/project/sqllineage/) package. -#### 3. Run the GE checkpoint +#### 3. Run the GX checkpoint ```bash great_expectations checkpoint run my_checkpoint #replace my_checkpoint with your checkpoint name diff --git a/docs/reference/anonymous_usage_statistics.md b/docs/reference/anonymous_usage_statistics.md index 51e40688bf65..1dcf2bad507d 100644 --- a/docs/reference/anonymous_usage_statistics.md +++ b/docs/reference/anonymous_usage_statistics.md @@ -2,7 +2,7 @@ title: Usage Statistics --- -To help us improve the tool, by default we track event data when certain Data Context-enabled commands are run. Our [blog post from April 2020](https://greatexpectations.io/blog/anonymous-usage-statistics/) explains a little bit more about what we want to capture with usage statistics and why! The usage statistics include things like the OS and python version, and which GE features are used. You can see the exact schemas for all of our messages [here](https://github.com/great-expectations/great_expectations/blob/develop/great_expectations/core/usage_statistics/schemas.py). +To help us improve the tool, by default we track event data when certain Data Context-enabled commands are run. Our [blog post from April 2020](https://greatexpectations.io/blog/anonymous-usage-statistics/) explains a little bit more about what we want to capture with usage statistics and why! The usage statistics include things like the OS and python version, and which GX features are used. You can see the exact schemas for all of our messages [here](https://github.com/great-expectations/great_expectations/blob/develop/great_expectations/core/usage_statistics/schemas.py). While we hope you'll leave them on, you can easily disable usage statistics for a Data Context by adding the following to your data context configuration: diff --git a/docs_rtd/_static/style.css b/docs_rtd/_static/style.css index b1241f3df358..efa33bdb4beb 100644 --- a/docs_rtd/_static/style.css +++ b/docs_rtd/_static/style.css @@ -130,7 +130,7 @@ a { top: 72px; } -/*GE HEADER STYLES*/ +/*GX HEADER STYLES*/ .ge_header { background-color: #fff; diff --git a/docs_rtd/contributing/miscellaneous.rst b/docs_rtd/contributing/miscellaneous.rst index 4e87fb11c640..9e13c93ba204 100644 --- a/docs_rtd/contributing/miscellaneous.rst +++ b/docs_rtd/contributing/miscellaneous.rst @@ -35,7 +35,7 @@ We are not asking you to assign copyright to us, but to give us the right to dis Release checklist ----------------------------------------- -GE core team members use this checklist to ship releases. +GX core team members use this checklist to ship releases. 1. If this is a major release (incrementing either the first or second version number) the manual acceptance testing must be completed. @@ -68,17 +68,17 @@ GE core team members use this checklist to ship releases. * The deploy step will automatically create a draft for the release. * Generally, we use the name of the tag (Ex: "0.11.2") as the release title. - + 14. Notify kyle@superconductive.com about any community-contributed PRs that should be celebrated. -15. Socialize the release on GE slack by copying the changelog with an optional nice personal message (thank people if you can) +15. Socialize the release on GX slack by copying the changelog with an optional nice personal message (thank people if you can) 16. Review and merge the automatically-generated PR for `conda-forge/great-expectations-feedstock `__, updating requirements as necessary and verifying the build status. - * To list requirements changed since a previous version of GE, you can run ``git diff <>..<> -- requirements.txt``. If there are differences, update the requirements section of ``recipe/meta.yaml``. This is an important step as this is not done automatically when the PR is generated. + * To list requirements changed since a previous version of GX, you can run ``git diff <>..<> -- requirements.txt``. If there are differences, update the requirements section of ``recipe/meta.yaml``. This is an important step as this is not done automatically when the PR is generated. * In most cases, the PR will change the SHA and version number. Check the commits for other changes, they may be maintenance changes from the Conda dev team which are OK to merge. * Review all items on the conda-forge CI system and PR checklist. The active conda-forge community frequently updates build processes and testing, and may help discover issues not observed in our pypi deployment process. * If you need to re-run a failing build and don't have appropriate permissions or you don't have permissions to merge please refer to the Account Permissions Overview on the superconductive `internal wikiwiki `__ for who to ask. Other conda-forge community partners are extremely responsive and may be able to help resolve issues quickly. -17. Check for open issues in the `GE conda-forge repository `__. If there are open issues that do not have a corresponding issue in the main `GE repo `__, please create an issue in the GE repo with a link to the corresponding conda issue (e.g. issue `#2021 `__). This allows us to internally triage and track the issue. +17. Check for open issues in the `GX conda-forge repository `__. If there are open issues that do not have a corresponding issue in the main `GX repo `__, please create an issue in the GX repo with a link to the corresponding conda issue (e.g. issue `#2021 `__). This allows us to internally triage and track the issue. 18. Celebrate! You have successfully released a new version of Great Expectations!! Beta Release Notes diff --git a/docs_rtd/contributing/style_guide.rst b/docs_rtd/contributing/style_guide.rst index bd4db355c709..cbb89090edc9 100644 --- a/docs_rtd/contributing/style_guide.rst +++ b/docs_rtd/contributing/style_guide.rst @@ -116,8 +116,8 @@ Within the table of contents, each section has specific role to play. Broadly sp * Abbreviated link to a class: ``:py:class:`~great_expectations.data_context.data_context.BaseDataContext``` :py:class:`~great_expectations.data_context.data_context.BaseDataContext` * Link to a method in a class: ``:py:meth:`great_expectations.data_context.data_context.BaseDataContext.validate_config``` :py:meth:`great_expectations.data_context.data_context.BaseDataContext.validate_config` * Abbreviated link to a method in a class: ``:py:meth:`~great_expectations.data_context.data_context.BaseDataContext.validate_config``` :py:meth:`~great_expectations.data_context.data_context.BaseDataContext.validate_config` - * Link to an attribute in a class: ``:py:attr:`great_expectations.data_context.data_context.BaseDataContext.GE_DIR``` :py:attr:`great_expectations.data_context.data_context.BaseDataContext.GE_DIR` - * Abbreviated link to an attribute in a class: ``:py:attr:`~great_expectations.data_context.data_context.BaseDataContext.GE_DIR``` :py:attr:`~great_expectations.data_context.data_context.BaseDataContext.GE_DIR` + * Link to an attribute in a class: ``:py:attr:`great_expectations.data_context.data_context.BaseDataContext.GX_DIR``` :py:attr:`great_expectations.data_context.data_context.BaseDataContext.GX_DIR` + * Abbreviated link to an attribute in a class: ``:py:attr:`~great_expectations.data_context.data_context.BaseDataContext.GX_DIR``` :py:attr:`~great_expectations.data_context.data_context.BaseDataContext.GX_DIR` * Link to a function in a module: ``:py:attr:`great_expectations.jupyter_ux.display_column_evrs_as_section``` :py:attr:`great_expectations.jupyter_ux.display_column_evrs_as_section` * Abbreviated to a function in a module: ``:py:attr:`~great_expectations.jupyter_ux.display_column_evrs_as_section``` :py:attr:`~great_expectations.jupyter_ux.display_column_evrs_as_section` diff --git a/docs_rtd/guides/how_to_guides/configuring_data_docs/how_to_create_renderers_for_custom_expectations.rst b/docs_rtd/guides/how_to_guides/configuring_data_docs/how_to_create_renderers_for_custom_expectations.rst index baa83ca65567..d05a7ad71e7d 100644 --- a/docs_rtd/guides/how_to_guides/configuring_data_docs/how_to_create_renderers_for_custom_expectations.rst +++ b/docs_rtd/guides/how_to_guides/configuring_data_docs/how_to_create_renderers_for_custom_expectations.rst @@ -19,7 +19,7 @@ This guide will help you implement renderers for your custom Expectations, allow - Configured an :ref:`Expectations Suite ` containing your custom Expectation. - Generated one Validation Result (from running a :ref:`Checkpoint ` or :ref:`Validation Operator `) containing your custom Expectation -See also this `complete custom expectation with renderer example `_. +See also this `complete custom expectation with renderer example `_. Steps ----- diff --git a/docs_rtd/guides/how_to_guides/configuring_datasources/how_to_configure_a_self_managed_spark_datasource.rst b/docs_rtd/guides/how_to_guides/configuring_datasources/how_to_configure_a_self_managed_spark_datasource.rst index 6a6f6a4d8dd3..1db6dca13143 100644 --- a/docs_rtd/guides/how_to_guides/configuring_datasources/how_to_configure_a_self_managed_spark_datasource.rst +++ b/docs_rtd/guides/how_to_guides/configuring_datasources/how_to_configure_a_self_managed_spark_datasource.rst @@ -109,7 +109,7 @@ To enable running Great Expectations against dataframe created by Spark SQL quer #. **Fill values:** * **query_name** - Name by which you want to reference the datasource. For next points we will use `my_first_query` name. You will use this name to select datasource when creating expectations. - * **spark_sql_query** - Spark SQL Query that will create DataFrame against which GE validations will be run. For next points we will use `select * from mydb.mytable` query. + * **spark_sql_query** - Spark SQL Query that will create DataFrame against which GX validations will be run. For next points we will use `select * from mydb.mytable` query. Now, when creating new expectation suite, query `main` will be available in the list of datasources. diff --git a/docs_rtd/guides/how_to_guides/configuring_datasources/how_to_configure_an_inferredassetdataconnector.rst b/docs_rtd/guides/how_to_guides/configuring_datasources/how_to_configure_an_inferredassetdataconnector.rst index a95898e805dc..94806dcec737 100644 --- a/docs_rtd/guides/how_to_guides/configuring_datasources/how_to_configure_an_inferredassetdataconnector.rst +++ b/docs_rtd/guides/how_to_guides/configuring_datasources/how_to_configure_an_inferredassetdataconnector.rst @@ -59,7 +59,7 @@ Imagine you have the following files in ``my_directory/``: my_directory/alpha-2020-01-02.csv my_directory/alpha-2020-01-03.csv -We can imagine 2 approaches to loading the data into GE. +We can imagine 2 approaches to loading the data into GX. The simplest approach would be to consider each file to be its own DataAsset. In that case, the configuration would look like the following: diff --git a/docs_rtd/guides/how_to_guides/configuring_datasources/how_to_configure_sorting_in_data_connectors.rst b/docs_rtd/guides/how_to_guides/configuring_datasources/how_to_configure_sorting_in_data_connectors.rst index 7735f33cace2..e11be699de9b 100644 --- a/docs_rtd/guides/how_to_guides/configuring_datasources/how_to_configure_sorting_in_data_connectors.rst +++ b/docs_rtd/guides/how_to_guides/configuring_datasources/how_to_configure_sorting_in_data_connectors.rst @@ -209,7 +209,7 @@ If you have the following ``numeric_example/`` directory in your filesystem, and numeric_example/test_555.csv **Note** : In our example, the ``base_directory`` is set to ``../``. If we are running this Notebook in the same folder as Great Expectations home directory (ie ``great_expectations/``), -GE will begin looking for the files in the parent directory. +GX will begin looking for the files in the parent directory. 1. **Load or create a DataContext** @@ -372,7 +372,7 @@ If you have the following ``datetime_example/`` directory in your filesystem, an datetime_example/test_20210102.csv **Note** : In our example, the ``base_directory`` is set to ``../``. If we are running this Notebook in the same folder as Great Expectations home directory (ie ``great_expectations/``), -GE will begin looking for the files in the parent directory. +GX will begin looking for the files in the parent directory. 1. **Load or create a DataContext** @@ -536,7 +536,7 @@ If you have the following ``elements/`` directory in your filesystem, and you wa elements_example/test_C.csv **Note** : In our example, the ``base_directory`` is set to ``../``. If we are running this Notebook in the same folder as Great Expectations home directory (ie ``great_expectations/``), -GE will begin looking for the files in the parent directory. +GX will begin looking for the files in the parent directory. 1. **Load or create a DataContext** @@ -708,7 +708,7 @@ files as batches within the ``my_data_asset`` DataAsset, sorting them by 1) Date multiple_sorters_example/test_EEE_555_20210103.csv **Note** : In our example, the ``base_directory`` is set to ``../``. If we are running this Notebook in the same folder as Great Expectations home directory (ie ``great_expectations/``), -GE will begin looking for the files in the parent directory. +GX will begin looking for the files in the parent directory. 1. **Load or create a DataContext** @@ -883,7 +883,7 @@ and we only wanted to consider the reports **on or after 2000**, and in **ascend year_reports/report_2020.csv **Note** : In our example, the ``base_directory`` is set to ``../``. If we are running this Notebook in the same folder as Great Expectations home directory (ie ``great_expectations/``), -GE will begin looking for the files in the parent directory. +GX will begin looking for the files in the parent directory. 1. **Load or create a DataContext** diff --git a/docs_rtd/guides/how_to_guides/creating_and_editing_expectations/how_to_create_custom_expectations.rst b/docs_rtd/guides/how_to_guides/creating_and_editing_expectations/how_to_create_custom_expectations.rst index d1097755cfa6..471d87197795 100644 --- a/docs_rtd/guides/how_to_guides/creating_and_editing_expectations/how_to_create_custom_expectations.rst +++ b/docs_rtd/guides/how_to_guides/creating_and_editing_expectations/how_to_create_custom_expectations.rst @@ -435,7 +435,7 @@ Beginning in version 0.13, we have introduced a new API focused on enabling Modu This guide will walk you through the process of creating your own Modular Expectations in 6 simple steps! - See also this `complete example `_. + See also this `complete example `_. .. admonition:: Prerequisites: This how-to guide assumes you have already: @@ -454,7 +454,7 @@ Beginning in version 0.13, we have introduced a new API focused on enabling Modu Once you’ve decided on an Expectation to implement, think of the different aggregations, mappings, or metadata you’ll need to validate your data within the Expectation - each of these will be a separate metric that must be implemented prior to validating your Expectation. - Fortunately, many Metrics have already been implemented for pre-existing Expectations, so it is possible you will find that the Metric you’d like to implement already exists within the GE framework and can be readily deployed. + Fortunately, many Metrics have already been implemented for pre-existing Expectations, so it is possible you will find that the Metric you’d like to implement already exists within the GX framework and can be readily deployed. #. **Implement your Metric** diff --git a/docs_rtd/guides/how_to_guides/creating_batches/how_to_create_a_batch_request_using_an_active_data_connector.rst b/docs_rtd/guides/how_to_guides/creating_batches/how_to_create_a_batch_request_using_an_active_data_connector.rst index 82fdfc8b149d..18f0d7f3bb90 100644 --- a/docs_rtd/guides/how_to_guides/creating_batches/how_to_create_a_batch_request_using_an_active_data_connector.rst +++ b/docs_rtd/guides/how_to_guides/creating_batches/how_to_create_a_batch_request_using_an_active_data_connector.rst @@ -24,7 +24,7 @@ Steps If you have the following ``reports`` directory in your filesystem, and you want to treat ``*.csv`` files as batches within the ``reports`` DataAsset: -**Note** : In our example, the ``base_directory`` is set to ``../``. If we are running this Notebook in the same folder as Great Expectations home directory (ie ``great_expectations/``), GE will begin looking for the files in the parent directory. +**Note** : In our example, the ``base_directory`` is set to ``../``. If we are running this Notebook in the same folder as Great Expectations home directory (ie ``great_expectations/``), GX will begin looking for the files in the parent directory. .. code-block:: bash diff --git a/docs_rtd/guides/how_to_guides/migrating_versions.rst b/docs_rtd/guides/how_to_guides/migrating_versions.rst index e25c9d891aa4..e1016f194154 100644 --- a/docs_rtd/guides/how_to_guides/migrating_versions.rst +++ b/docs_rtd/guides/how_to_guides/migrating_versions.rst @@ -322,7 +322,7 @@ How to use the project ``check-config`` command * The command will display this message when done: ``Your config file appears valid!``. - * Rename your Expectation Suites to make them compatible with the new naming. Save this Python code snippet in a file called ``update_project.py``, then run it using the command: ``python update_project.py PATH_TO_GE_CONFIG_DIRECTORY``: + * Rename your Expectation Suites to make them compatible with the new naming. Save this Python code snippet in a file called ``update_project.py``, then run it using the command: ``python update_project.py PATH_TO_GX_CONFIG_DIRECTORY``: .. code-block:: python @@ -540,7 +540,7 @@ How to use the project ``check-config`` command How to upgrade to 0.7.x *********************** - In version 0.7, GE introduced several new features, and significantly changed the way DataContext objects work: + In version 0.7, GX introduced several new features, and significantly changed the way DataContext objects work: - A :ref:`data_context` object manages access to expectation suites and other configuration in addition to data assets. It provides a flexible but opinionated structure for creating and storing configuration and expectations in version @@ -853,7 +853,7 @@ How to use the project ``check-config`` command * The command will display this message when done: ``Your config file appears valid!``. - * Rename your Expectation Suites to make them compatible with the new naming. Save this Python code snippet in a file called ``update_project.py``, then run it using the command: ``python update_project.py PATH_TO_GE_CONFIG_DIRECTORY``: + * Rename your Expectation Suites to make them compatible with the new naming. Save this Python code snippet in a file called ``update_project.py``, then run it using the command: ``python update_project.py PATH_TO_GX_CONFIG_DIRECTORY``: .. code-block:: python @@ -1071,7 +1071,7 @@ How to use the project ``check-config`` command How to upgrade to 0.7.x *********************** - In version 0.7, GE introduced several new features, and significantly changed the way DataContext objects work: + In version 0.7, GX introduced several new features, and significantly changed the way DataContext objects work: - A :ref:`data_context` object manages access to expectation suites and other configuration in addition to data assets. It provides a flexible but opinionated structure for creating and storing configuration and expectations in version diff --git a/docs_rtd/guides/how_to_guides/miscellaneous/how_to_write_a_how_to_guide.rst b/docs_rtd/guides/how_to_guides/miscellaneous/how_to_write_a_how_to_guide.rst index c1e5e282935e..c73d9400a850 100644 --- a/docs_rtd/guides/how_to_guides/miscellaneous/how_to_write_a_how_to_guide.rst +++ b/docs_rtd/guides/how_to_guides/miscellaneous/how_to_write_a_how_to_guide.rst @@ -17,7 +17,7 @@ Steps - Hint: it's often most efficient to run through the technical steps in a notebook or at the command line before starting to write. Then you can simply copy-paste content into code blocks, and write headers and text to connect them. - After you finish writing, you should definitely follow your own steps from start to finish at least once, to make sure there aren’t any gaps. - + #. If needed, add content to Additional Notes and/or Additional Resources. These sections supplement the article with information that would be distracting to include in Steps. It’s fine for them to be empty. #. Enable comments for your How-to guide, by following :ref:`these instructions `. #. Scan your article to make sure it follows the :ref:`Style guide `. If you’re not familiar with the Style Guide, that’s okay: your PR reviewer will also check for style and let you know if we find any issues. @@ -96,7 +96,7 @@ Indentation, bolding, and code blocks - Indent content within steps. - Any time the user needs to do something, it should be in a code block. - - Please follow this convention even if the text in the code block is somewhat redundant against the text of the step. + - Please follow this convention even if the text in the code block is somewhat redundant against the text of the step. - Clear, sequential code blocks are easy for the eye to follow. They encourage a health copy-and-modify development pattern. - All of these styles are modeled in the :ref:`How-to guide template file `. If you use that template as your guide, you'll be off to a very good start. @@ -112,7 +112,7 @@ Indentation, bolding, and code blocks Using tabs to differentiate guides for different APIs ##################################################### -During the process of writing documentation for Great Expectations 0.13, there rose a need to differentiate between documentation for GE up to 0.12.x, and GE 0.13 and beyond. +During the process of writing documentation for Great Expectations 0.13, there rose a need to differentiate between documentation for GX up to 0.12.x, and GX 0.13 and beyond. The use of ``content-tabs`` allows for both documentation to co-exist in the same how-to-doc. diff --git a/docs_rtd/guides/how_to_guides/spare_parts/how_to_create_expectations.rst b/docs_rtd/guides/how_to_guides/spare_parts/how_to_create_expectations.rst index 1d1ca25bfdd4..e50740013d13 100644 --- a/docs_rtd/guides/how_to_guides/spare_parts/how_to_create_expectations.rst +++ b/docs_rtd/guides/how_to_guides/spare_parts/how_to_create_expectations.rst @@ -102,9 +102,9 @@ If you do not use the :ref:`CLI `, create a new notebook in the``g import great_expectations.jupyter_ux from great_expectations.data_context.types.resource_identifiers import ValidationResultIdentifier - # Data Context is a GE object that represents your project. + # Data Context is a GX object that represents your project. # Your project's great_expectations.yml contains all the config - # options for the project's GE Data Context. + # options for the project's GX Data Context. context = gx.data_context.DataContext() # Create a new empty Expectation Suite diff --git a/docs_rtd/guides/tutorials/getting_started/initialize_a_data_context.rst b/docs_rtd/guides/tutorials/getting_started/initialize_a_data_context.rst index 6efcf8077e6a..cf006256ba4e 100644 --- a/docs_rtd/guides/tutorials/getting_started/initialize_a_data_context.rst +++ b/docs_rtd/guides/tutorials/getting_started/initialize_a_data_context.rst @@ -33,13 +33,13 @@ Set up your machine for the tutorial For this tutorial, we will use a simplified version of the NYC taxi ride data. -Clone the `ge_tutorials `_ repository to download the data and directories with the final versions of the tutorial, which you can use for reference: +Clone the `ge_tutorials `_ repository to download the data and directories with the final versions of the tutorial, which you can use for reference: .. code-block:: bash - git clone https://github.com/superconductive/ge_tutorials + git clone https://github.com/great-expectations/gx_tutorials cd ge_tutorials - + What you find in the ge_tutorials repository --------------------------------------------- @@ -100,7 +100,7 @@ You should see this: |-- documentation |-- validations - OK to proceed? [Y/n]: + OK to proceed? [Y/n]: **Let's pause there for a moment and take a look under the hood.** diff --git a/docs_rtd/guides/tutorials/getting_started_v3_api/initialize_a_data_context.rst b/docs_rtd/guides/tutorials/getting_started_v3_api/initialize_a_data_context.rst index b82cc29f6c1f..ca6fa71a6efd 100644 --- a/docs_rtd/guides/tutorials/getting_started_v3_api/initialize_a_data_context.rst +++ b/docs_rtd/guides/tutorials/getting_started_v3_api/initialize_a_data_context.rst @@ -33,13 +33,13 @@ Set up your machine for the tutorial For this tutorial, we will use a simplified version of the NYC taxi ride data. -Clone the `ge_tutorials `_ repository to download the data and directories with the final versions of the tutorial, which you can use for reference: +Clone the `ge_tutorials `_ repository to download the data and directories with the final versions of the tutorial, which you can use for reference: .. code-block:: bash - git clone https://github.com/superconductive/ge_tutorials + git clone https://github.com/great-expectations/gx_tutorials cd ge_tutorials - + What you find in the ge_tutorials repository --------------------------------------------- diff --git a/docs_rtd/guides/tutorials/how_to_create_expectations.rst b/docs_rtd/guides/tutorials/how_to_create_expectations.rst index e9831a20fb2d..ce57e1544be7 100644 --- a/docs_rtd/guides/tutorials/how_to_create_expectations.rst +++ b/docs_rtd/guides/tutorials/how_to_create_expectations.rst @@ -99,9 +99,9 @@ If you do not use the :ref:`CLI `, create a new notebook in the``g import great_expectations.jupyter_ux from great_expectations.data_context.types.resource_identifiers import ValidationResultIdentifier - # Data Context is a GE object that represents your project. + # Data Context is a GX object that represents your project. # Your project's great_expectations.yml contains all the config - # options for the project's GE Data Context. + # options for the project's GX Data Context. context = gx.data_context.DataContext() # Create a new empty Expectation Suite diff --git a/docs_rtd/guides/workflows_patterns/deployment_airflow.rst b/docs_rtd/guides/workflows_patterns/deployment_airflow.rst index 0cf9e9c606bc..4bfe3af270b9 100644 --- a/docs_rtd/guides/workflows_patterns/deployment_airflow.rst +++ b/docs_rtd/guides/workflows_patterns/deployment_airflow.rst @@ -20,7 +20,7 @@ There are three supported methods for running :ref:`validation`_ +- `Great Expectations Pipeline Tutorial `_ In the first link and the diagram below, you can see a common pattern of using validation tasks to ensure that the data flowing from one task to the next is correct, and alert the team if it is not. Another common pattern is to branch and change your DAG based on a validation (e.g. send data for more cleaning before moving to the next task, store it for a postmortem, etc.). @@ -144,7 +144,7 @@ Please see this how-to guide for :ref:`How to run a Checkpoint in Airflow `_ showing Great Expectations implemented in an airflow pipeline. +- `Great Expectations Pipeline Tutorial `_ showing Great Expectations implemented in an airflow pipeline. Comments -------- diff --git a/docs_rtd/reference/core_concepts/data_docs.rst b/docs_rtd/reference/core_concepts/data_docs.rst index 7d693111380c..015dae349522 100644 --- a/docs_rtd/reference/core_concepts/data_docs.rst +++ b/docs_rtd/reference/core_concepts/data_docs.rst @@ -5,14 +5,14 @@ Data Docs ######### Data Docs compile Great Expectations objects such as Expectations and -Validations into structured, formatted documents. In these documents, they +Validations into structured, formatted documents. In these documents, they attempt to capture the key characteristics of a dataset. One example of Data Docs is HTML documentation, which takes expectation suites and validation results and produces clear, functional, and self-updating documentation of expected and observed data characteristics. Together with profiling, it can help to rapidly create a clearer picture of your data, and keep your entire team on the same page as data evolves. -For example, the default BasicDatasetProfiler in GE will produce validation_results which compile to a page for each +For example, the default BasicDatasetProfiler in GX will produce validation_results which compile to a page for each table or DataFrame including an overview section: .. image:: /images/movie_db_profiling_screenshot_2.jpg @@ -64,5 +64,3 @@ The HTML documentation generated by Great Expectations Data Docs is fully custom of these pages or create your own, see :ref:`customizing_data_docs`. See the :ref:`reference__core_concepts__data_docs` for more information. - - diff --git a/docs_rtd/reference/core_concepts/expectations/expectations.rst b/docs_rtd/reference/core_concepts/expectations/expectations.rst index 0a40f0282bfc..8f021570daff 100644 --- a/docs_rtd/reference/core_concepts/expectations/expectations.rst +++ b/docs_rtd/reference/core_concepts/expectations/expectations.rst @@ -68,12 +68,12 @@ Methods for creating and editing Expectations ********************************************* Generating expectations is one of the most important parts of using Great Expectations effectively, and there are -a variety of methods for generating and encoding expectations. When expectations are encoded in the GE format, they +a variety of methods for generating and encoding expectations. When expectations are encoded in the GX format, they become shareable and persistent sources of truth about how data was expected to behave-and how it actually did. There are several paths to generating expectations: -1. Automated inspection of datasets. Currently, the profiler mechanism in GE produces expectation suites that can be +1. Automated inspection of datasets. Currently, the profiler mechanism in GX produces expectation suites that can be used for validation. In some cases, the goal is :ref:`profiling` your data, and in other cases automated inspection can produce expectations that will be used in validating future batches of data. @@ -81,7 +81,7 @@ There are several paths to generating expectations: expectations. Interviewing experts and encoding their tacit knowledge of common distributions, values, or failure conditions can be can excellent way to generate expectations. -3. Exploratory Analysis. Using GE in an exploratory analysis workflow (e.g. within Jupyter notebooks) is an important \ +3. Exploratory Analysis. Using GX in an exploratory analysis workflow (e.g. within Jupyter notebooks) is an important \ way to develop experience with both raw and derived datasets and generate useful and testable expectations about characteristics that may be important for the data's eventual purpose, whether reporting or feeding another downstream model or data system. diff --git a/docs_rtd/reference/spare_parts/data_context_reference.rst b/docs_rtd/reference/spare_parts/data_context_reference.rst index b36942562817..b79722fdcedb 100644 --- a/docs_rtd/reference/spare_parts/data_context_reference.rst +++ b/docs_rtd/reference/spare_parts/data_context_reference.rst @@ -185,7 +185,7 @@ providing the bucket/prefix combination: evaluation_parameter_store: class_name: EvaluationParameterStore -GE uses `boto3 `_ to access AWS, so credentials +GX uses `boto3 `_ to access AWS, so credentials simply need to be available in any standard place searched by that library. You may also specify keyword arguments for boto3 to use in the `boto3_options key` of the store_backend configuration. @@ -225,7 +225,7 @@ useful for configuring database access. Variable substitution enables: 1) keeping secrets out of source control & 2) environment-based configuration changes such as staging vs prod. -When GE encounters substitution syntax (like ``my_key: ${my_value}`` or +When GX encounters substitution syntax (like ``my_key: ${my_value}`` or ``my_key: $my_value``) in the great_expectations.yml config file it will attempt to replace the value of ``my_key`` with the value from an environment variable ``my_value`` or a corresponding key read from the file specified using ``config_variables_file_path``, which is located in uncommitted/config_variables.yml by default. This is an example of a config_variables.yml file: @@ -312,7 +312,7 @@ new directory or use this template: # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # - # When GE encounters substitution syntax (like `my_key: ${my_value}` or + # When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the config file it will attempt to replace the value # of `my_key` with the value from an environment variable `my_value` or a # corresponding key read from the file specified using @@ -398,7 +398,7 @@ new directory or use this template: Usage Statistics ################# -To help us improve the tool, by default we track event data when certain Data Context-enabled commands are run. Our `blog post from April 2020 `_ explains a little bit more about what we want to capture with usage statistics and why! The usage statistics include things like the OS and python version, and which GE features are used. You can see the exact +To help us improve the tool, by default we track event data when certain Data Context-enabled commands are run. Our `blog post from April 2020 `_ explains a little bit more about what we want to capture with usage statistics and why! The usage statistics include things like the OS and python version, and which GX features are used. You can see the exact schemas for all of our messages `here `_. While we hope you'll leave them on, you can easily disable usage statistics for a Data Context by adding the diff --git a/docs_rtd/reference/spare_parts/profiling_reference.rst b/docs_rtd/reference/spare_parts/profiling_reference.rst index c5fbd6ec6b79..5c5e2d510f11 100644 --- a/docs_rtd/reference/spare_parts/profiling_reference.rst +++ b/docs_rtd/reference/spare_parts/profiling_reference.rst @@ -15,7 +15,7 @@ Expectations and Profiling In order to characterize a data asset, Profiling uses an Expectation Suite. Unlike the Expectations that are typically used for data validation, these expectations do not necessarily apply any constraints; they can simply -identify statistics or other data characteristics that should be evaluated and made available in GE. For example, when +identify statistics or other data characteristics that should be evaluated and made available in GX. For example, when the ``BasicDatasetProfiler`` encounters a numeric column, it will add an ``expect_column_mean_to_be_between`` expectation but choose the min_value and max_value to both be None: essentially only saying that it expects a mean to exist. @@ -32,7 +32,7 @@ to exist. } To "profile" a datasource, therefore, the :class:`~great_expectations.profile.basic_dataset_profiler.\ -BasicDatasetProfiler` included in GE will generate a large number of very loosely-specified expectations. Effectively +BasicDatasetProfiler` included in GX will generate a large number of very loosely-specified expectations. Effectively it is asserting that the given statistic is relevant for evaluating batches of that data asset, but it is not yet sure what the statistic's value should be. @@ -76,7 +76,7 @@ build and launch Data Docs based on your data. Run From Command Line ======================= -The GE command-line interface can profile a datasource: +The GX command-line interface can profile a datasource: .. code-block:: bash diff --git a/examples/integrations/airflow/README.md b/examples/integrations/airflow/README.md index 42702ffb1d23..cf1a68eea66c 100644 --- a/examples/integrations/airflow/README.md +++ b/examples/integrations/airflow/README.md @@ -1 +1 @@ -This example shows how Great Expectations can protect an Airflow data pipeline from bad data and code bugs: [GE Airflow Example](https://github.com/superconductive/ge_tutorials) +This example shows how Great Expectations can protect an Airflow data pipeline from bad data and code bugs: [GX Airflow Example](https://github.com/great-expectations/gx_tutorials) diff --git a/great_expectations/checkpoint/actions.py b/great_expectations/checkpoint/actions.py index 7faf829a1950..e1aa16d2ac4c 100644 --- a/great_expectations/checkpoint/actions.py +++ b/great_expectations/checkpoint/actions.py @@ -1134,7 +1134,7 @@ def _run( if not self.data_context.ge_cloud_mode: return Exception( - "CloudNotificationActions can only be used in GE Cloud Mode." + "CloudNotificationActions can only be used in GX Cloud Mode." ) if not isinstance(validation_result_suite_identifier, GXCloudIdentifier): raise TypeError( diff --git a/great_expectations/checkpoint/util.py b/great_expectations/checkpoint/util.py index 3d163d3c49a4..ded698ac01bc 100644 --- a/great_expectations/checkpoint/util.py +++ b/great_expectations/checkpoint/util.py @@ -486,7 +486,7 @@ def validate_validation_dict(validation_dict: dict) -> None: def send_cloud_notification(url: str, headers: dict): """ - Post a CloudNotificationAction to GE Cloud Backend for processing. + Post a CloudNotificationAction to GX Cloud Backend for processing. """ session = requests.Session() diff --git a/great_expectations/cli/batch_request.py b/great_expectations/cli/batch_request.py index ba8fb388f77c..615e4f0a54a4 100644 --- a/great_expectations/cli/batch_request.py +++ b/great_expectations/cli/batch_request.py @@ -24,7 +24,7 @@ SimpleSqlalchemyDatasource, ) from great_expectations.execution_engine import SqlAlchemyExecutionEngine -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect from great_expectations.util import filter_properties_dict logger = logging.getLogger(__name__) @@ -384,7 +384,7 @@ def _get_data_asset_name_for_simple_sqlalchemy_datasource( ) if ( - datasource.execution_engine.engine.dialect.name.lower() == GESqlDialect.BIGQUERY + datasource.execution_engine.engine.dialect.name.lower() == GXSqlDialect.BIGQUERY and parse_bigquery_url is not None ): # bigquery table needs to contain the project id if it differs from the credentials project diff --git a/great_expectations/cli/checkpoint.py b/great_expectations/cli/checkpoint.py index 02a7935e73c6..7af80e26346d 100644 --- a/great_expectations/cli/checkpoint.py +++ b/great_expectations/cli/checkpoint.py @@ -165,7 +165,7 @@ def _verify_checkpoint_does_not_exist( def _get_notebook_path(context: DataContext, notebook_name: str) -> str: return os.path.abspath( os.path.join( - context.root_directory, context.GE_EDIT_NOTEBOOK_DIR, notebook_name + context.root_directory, context.GX_EDIT_NOTEBOOK_DIR, notebook_name ) ) @@ -326,7 +326,7 @@ def checkpoint_script(ctx: click.Context, checkpoint: str) -> None: script_name: str = f"run_{checkpoint}.py" script_path: str = os.path.join( - context.root_directory, context.GE_UNCOMMITTED_DIR, script_name + context.root_directory, context.GX_UNCOMMITTED_DIR, script_name ) if os.path.isfile(script_path): diff --git a/great_expectations/cli/cli.py b/great_expectations/cli/cli.py index 24dce4dac2c1..65d7013ec5c8 100644 --- a/great_expectations/cli/cli.py +++ b/great_expectations/cli/cli.py @@ -11,7 +11,7 @@ from great_expectations.cli.cli_logging import _set_up_logger from great_expectations.cli.pretty_printing import cli_message from great_expectations.data_context.types.base import ( - FIRST_GE_CONFIG_VERSION_WITH_CHECKPOINT_STORE, + FIRST_GX_CONFIG_VERSION_WITH_CHECKPOINT_STORE, ) try: @@ -180,7 +180,7 @@ def cli( ge_config_version: float = ( ctx.obj.get_data_context_from_config_file().get_config().config_version ) - if ge_config_version >= FIRST_GE_CONFIG_VERSION_WITH_CHECKPOINT_STORE: + if ge_config_version >= FIRST_GX_CONFIG_VERSION_WITH_CHECKPOINT_STORE: raise ge_exceptions.InvalidDataContextConfigError( f"Using the legacy v2 (Batch Kwargs) API with a recent config version ({ge_config_version}) is illegal." ) diff --git a/great_expectations/cli/cli_messages.py b/great_expectations/cli/cli_messages.py index 969d51b81478..48070d36dc7c 100644 --- a/great_expectations/cli/cli_messages.py +++ b/great_expectations/cli/cli_messages.py @@ -56,7 +56,7 @@ - Run `great_expectations suite --help` to create, edit, list, profile Expectation Suites. - Run `great_expectations docs --help` to build and manage Data Docs sites. - Edit your configuration in {DataContext.GE_YML} to: + Edit your configuration in {DataContext.GX_YML} to: - Move Stores to the cloud - Add Slack notifications, PagerDuty alerts, etc. - Customize your Data Docs diff --git a/great_expectations/cli/datasource.py b/great_expectations/cli/datasource.py index 4f4f3d3da282..95893d644ea0 100644 --- a/great_expectations/cli/datasource.py +++ b/great_expectations/cli/datasource.py @@ -259,7 +259,7 @@ def create_notebook(self, context: DataContext) -> str: renderer = self.get_notebook_renderer(context) notebook_path = os.path.join( context.root_directory, - context.GE_UNCOMMITTED_DIR, + context.GX_UNCOMMITTED_DIR, "datasource_new.ipynb", ) renderer.render_to_disk(notebook_path) diff --git a/great_expectations/cli/init.py b/great_expectations/cli/init.py index 27864edb9756..4b5041da9d89 100644 --- a/great_expectations/cli/init.py +++ b/great_expectations/cli/init.py @@ -62,7 +62,7 @@ def init(ctx: click.Context, usage_stats: bool) -> None: if DataContext.does_config_exist_on_disk(ge_dir): message = ( - f"""Warning. An existing `{DataContext.GE_YML}` was found here: {ge_dir}.""" + f"""Warning. An existing `{DataContext.GX_YML}` was found here: {ge_dir}.""" ) warnings.warn(message) try: @@ -119,4 +119,4 @@ def init(ctx: click.Context, usage_stats: bool) -> None: def _get_full_path_to_ge_dir(target_directory: str) -> str: - return os.path.abspath(os.path.join(target_directory, DataContext.GE_DIR)) + return os.path.abspath(os.path.join(target_directory, DataContext.GX_DIR)) diff --git a/great_expectations/cli/project.py b/great_expectations/cli/project.py index dce3f0f6d985..ebc467712e2d 100644 --- a/great_expectations/cli/project.py +++ b/great_expectations/cli/project.py @@ -12,7 +12,7 @@ from great_expectations.cli.upgrade_helpers import GE_UPGRADE_HELPER_VERSION_MAP from great_expectations.core.usage_statistics.events import UsageStatsEvents from great_expectations.core.usage_statistics.util import send_usage_message -from great_expectations.data_context.types.base import CURRENT_GE_CONFIG_VERSION +from great_expectations.data_context.types.base import CURRENT_GX_CONFIG_VERSION @click.group() @@ -73,17 +73,17 @@ def do_config_check(target_directory: str) -> Tuple[bool, str, Optional[DataCont try: context = DataContext(context_root_dir=target_directory) ge_config_version: int = context.get_config().config_version - if int(ge_config_version) < CURRENT_GE_CONFIG_VERSION: + if int(ge_config_version) < CURRENT_GX_CONFIG_VERSION: is_config_ok = False upgrade_message = f"""The config_version of your great_expectations.yml -- {float(ge_config_version)} -- is outdated. Please consult the V3 API migration guide https://docs.greatexpectations.io/docs/guides/miscellaneous/migration_guide#migrating-to-the-batch-request-v3-api and -upgrade your Great Expectations configuration to version {float(CURRENT_GE_CONFIG_VERSION)} in order to take advantage of the latest capabilities. +upgrade your Great Expectations configuration to version {float(CURRENT_GX_CONFIG_VERSION)} in order to take advantage of the latest capabilities. """ context = None - elif int(ge_config_version) > CURRENT_GE_CONFIG_VERSION: + elif int(ge_config_version) > CURRENT_GX_CONFIG_VERSION: raise ge_exceptions.UnsupportedConfigVersionError( f"""Invalid config version ({ge_config_version}).\n The maximum valid version is \ -{CURRENT_GE_CONFIG_VERSION}. +{CURRENT_GX_CONFIG_VERSION}. """ ) else: diff --git a/great_expectations/cli/suite.py b/great_expectations/cli/suite.py index de056b4e3f61..b0f0f0ede224 100644 --- a/great_expectations/cli/suite.py +++ b/great_expectations/cli/suite.py @@ -978,6 +978,6 @@ def suite_list(ctx: click.Context) -> None: def _get_notebook_path(context: DataContext, notebook_name: str) -> str: return os.path.abspath( os.path.join( - context.root_directory, context.GE_EDIT_NOTEBOOK_DIR, notebook_name + context.root_directory, context.GX_EDIT_NOTEBOOK_DIR, notebook_name ) ) diff --git a/great_expectations/cli/toolkit.py b/great_expectations/cli/toolkit.py index c8be87ae5840..446706d0c9c0 100644 --- a/great_expectations/cli/toolkit.py +++ b/great_expectations/cli/toolkit.py @@ -21,7 +21,7 @@ from great_expectations.core.usage_statistics.events import UsageStatsEvents from great_expectations.core.usage_statistics.util import send_usage_message from great_expectations.data_context.data_context import DataContext -from great_expectations.data_context.types.base import CURRENT_GE_CONFIG_VERSION +from great_expectations.data_context.types.base import CURRENT_GX_CONFIG_VERSION from great_expectations.data_context.types.resource_identifiers import ( ExpectationSuiteIdentifier, ) @@ -397,17 +397,17 @@ def load_data_context_with_error_handling( ge_config_version = context.get_config().config_version if from_cli_upgrade_command: - if ge_config_version < CURRENT_GE_CONFIG_VERSION: + if ge_config_version < CURRENT_GX_CONFIG_VERSION: context = upgrade_project_one_or_multiple_versions_increment( directory=directory, context=context, ge_config_version=ge_config_version, from_cli_upgrade_command=from_cli_upgrade_command, ) - elif ge_config_version > CURRENT_GE_CONFIG_VERSION: + elif ge_config_version > CURRENT_GX_CONFIG_VERSION: raise ge_exceptions.UnsupportedConfigVersionError( f"""Invalid config version ({ge_config_version}).\n The maximum valid version is \ -{CURRENT_GE_CONFIG_VERSION}. +{CURRENT_GX_CONFIG_VERSION}. """ ) else: @@ -460,7 +460,7 @@ def upgrade_project_strictly_multiple_versions_increment( else None ) context: Optional[DataContext] - if upgrade_helper_class and int(ge_config_version) < CURRENT_GE_CONFIG_VERSION: + if upgrade_helper_class and int(ge_config_version) < CURRENT_GX_CONFIG_VERSION: upgrade_project( context_root_dir=directory, ge_config_version=ge_config_version, @@ -492,13 +492,13 @@ def upgrade_project( message = ( f"\nYour project appears to have an out-of-date config version ({ge_config_version}) - " f"the version " - f"number must be at least {CURRENT_GE_CONFIG_VERSION}." + f"number must be at least {CURRENT_GX_CONFIG_VERSION}." ) else: message = ( f"\nYour project appears to have an out-of-date config version ({ge_config_version}) - " f"the version " - f"number must be at least {CURRENT_GE_CONFIG_VERSION}.\nIn order to proceed, " + f"number must be at least {CURRENT_GX_CONFIG_VERSION}.\nIn order to proceed, " f"your project must be upgraded." ) @@ -523,7 +523,7 @@ def upgrade_project( cli_message(string=SECTION_SEPARATOR) # use loop in case multiple upgrades need to take place - while int(ge_config_version) < CURRENT_GE_CONFIG_VERSION: + while int(ge_config_version) < CURRENT_GX_CONFIG_VERSION: ( increment_version, exception_occurred, @@ -550,7 +550,7 @@ def upgrade_project( https://docs.greatexpectations.io/docs/guides/miscellaneous/migration_guide#migrating-to-the-batch-request-v3-api """ - if int(ge_config_version) < CURRENT_GE_CONFIG_VERSION: + if int(ge_config_version) < CURRENT_GX_CONFIG_VERSION: cli_message(string=upgrade_incomplete_message) else: cli_message(upgrade_success_message) @@ -589,7 +589,7 @@ def upgrade_project_one_or_multiple_versions_increment( upgrade_successful: bool = False - if (CURRENT_GE_CONFIG_VERSION - int(ge_config_version)) == 1: + if (CURRENT_GX_CONFIG_VERSION - int(ge_config_version)) == 1: ( increment_version, exception_occurred, @@ -738,9 +738,9 @@ def upgrade_project_up_to_one_version_increment( if not upgrade_helper_class: return False, False - # set version temporarily to CURRENT_GE_CONFIG_VERSION to get functional DataContext + # set version temporarily to CURRENT_GX_CONFIG_VERSION to get functional DataContext DataContext.set_ge_config_version( - config_version=CURRENT_GE_CONFIG_VERSION, + config_version=CURRENT_GX_CONFIG_VERSION, context_root_dir=context_root_dir, ) diff --git a/great_expectations/cli/v012/checkpoint.py b/great_expectations/cli/v012/checkpoint.py index 12f7e61c7117..8915f7b40d4c 100644 --- a/great_expectations/cli/v012/checkpoint.py +++ b/great_expectations/cli/v012/checkpoint.py @@ -316,7 +316,7 @@ def checkpoint_script(checkpoint, directory) -> None: script_name = f"run_{checkpoint}.py" script_path = os.path.join( - context.root_directory, context.GE_UNCOMMITTED_DIR, script_name + context.root_directory, context.GX_UNCOMMITTED_DIR, script_name ) if os.path.isfile(script_path): diff --git a/great_expectations/cli/v012/cli_messages.py b/great_expectations/cli/v012/cli_messages.py index 66bd7f497175..ee9612a9a9f6 100644 --- a/great_expectations/cli/v012/cli_messages.py +++ b/great_expectations/cli/v012/cli_messages.py @@ -65,7 +65,7 @@ - running `great_expectations datasource new` or - by editing the {} file """.format( - DataContext.GE_YML + DataContext.GX_YML ) SETUP_SUCCESS = "\nCongratulations! Great Expectations is now set up." diff --git a/great_expectations/cli/v012/datasource.py b/great_expectations/cli/v012/datasource.py index f94f7ce2b612..a77d0981e24f 100644 --- a/great_expectations/cli/v012/datasource.py +++ b/great_expectations/cli/v012/datasource.py @@ -43,7 +43,7 @@ BatchKwargsError, DatasourceInitializationError, ) -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect from great_expectations.validator.validator import BridgeValidator logger = logging.getLogger(__name__) @@ -498,7 +498,7 @@ def _add_sqlalchemy_datasource(context, prompt_for_datasource_name=True): # with the datasource's name as the variable name. # The value of the datasource's "credentials" key in the config file (great_expectations.yml) will # be ${datasource name}. - # GE will replace the ${datasource name} with the value from the credentials file in runtime. + # GX will replace the ${datasource name} with the value from the credentials file in runtime. while True: cli_message(msg_db_config.format(datasource_name)) @@ -606,7 +606,7 @@ def _add_sqlalchemy_datasource(context, prompt_for_datasource_name=True): """.format( datasource_name, - DataContext.GE_YML, + DataContext.GX_YML, context.get_config()["config_variables_file_path"], rtd_url_ge_version, selected_database.value.lower(), @@ -1368,7 +1368,7 @@ def _get_batch_kwargs_for_sqlalchemy_datasource( temp_table_kwargs = {} datasource = context.get_datasource(datasource_name) - if datasource.engine.dialect.name.lower() == GESqlDialect.BIGQUERY: + if datasource.engine.dialect.name.lower() == GXSqlDialect.BIGQUERY: # bigquery table needs to contain the project id if it differs from the credentials project if len(data_asset_name.split(".")) < 3: project_id, _, _, _, _, _ = parse_bigquery_url(datasource.engine.url) diff --git a/great_expectations/cli/v012/init.py b/great_expectations/cli/v012/init.py index aadfa7bdc6d6..5145584e4617 100644 --- a/great_expectations/cli/v012/init.py +++ b/great_expectations/cli/v012/init.py @@ -219,4 +219,4 @@ def _slack_setup(context): def _get_full_path_to_ge_dir(target_directory): - return os.path.abspath(os.path.join(target_directory, DataContext.GE_DIR)) + return os.path.abspath(os.path.join(target_directory, DataContext.GX_DIR)) diff --git a/great_expectations/cli/v012/project.py b/great_expectations/cli/v012/project.py index 87e019f07b7c..ac2553472788 100644 --- a/great_expectations/cli/v012/project.py +++ b/great_expectations/cli/v012/project.py @@ -8,7 +8,7 @@ from great_expectations.cli.v012.toolkit import load_data_context_with_error_handling from great_expectations.cli.v012.util import cli_message from great_expectations.core.usage_statistics.util import send_usage_message -from great_expectations.data_context.types.base import CURRENT_GE_CONFIG_VERSION +from great_expectations.data_context.types.base import CURRENT_GX_CONFIG_VERSION @click.group() @@ -68,10 +68,10 @@ def do_config_check(target_directory): try: context = DataContext(context_root_dir=target_directory) ge_config_version: int = context.get_config().config_version - if int(ge_config_version) < CURRENT_GE_CONFIG_VERSION: + if int(ge_config_version) < CURRENT_GX_CONFIG_VERSION: upgrade_message: str = f"""The config_version of your great_expectations.yml -- {float(ge_config_version)} -- is outdated. Please consult the V3 API migration guide https://docs.greatexpectations.io/en/latest/guides/how_to_guides/migrating_versions.html and -upgrade your Great Expectations configuration to version {float(CURRENT_GE_CONFIG_VERSION)} in order to take advantage of the latest capabilities. +upgrade your Great Expectations configuration to version {float(CURRENT_GX_CONFIG_VERSION)} in order to take advantage of the latest capabilities. """ return ( False, diff --git a/great_expectations/cli/v012/suite.py b/great_expectations/cli/v012/suite.py index c3c05a584b2d..a6aeb2e1812a 100644 --- a/great_expectations/cli/v012/suite.py +++ b/great_expectations/cli/v012/suite.py @@ -553,6 +553,6 @@ def suite_list(directory): def _get_notebook_path(context, notebook_name): return os.path.abspath( os.path.join( - context.root_directory, context.GE_EDIT_NOTEBOOK_DIR, notebook_name + context.root_directory, context.GX_EDIT_NOTEBOOK_DIR, notebook_name ) ) diff --git a/great_expectations/cli/v012/toolkit.py b/great_expectations/cli/v012/toolkit.py index 81b4f03e53f0..11d4e8f8507a 100644 --- a/great_expectations/cli/v012/toolkit.py +++ b/great_expectations/cli/v012/toolkit.py @@ -22,7 +22,7 @@ from great_expectations.core.usage_statistics.util import send_usage_message from great_expectations.data_asset import DataAsset from great_expectations.data_context.data_context import DataContext -from great_expectations.data_context.types.base import CURRENT_GE_CONFIG_VERSION +from great_expectations.data_context.types.base import CURRENT_GX_CONFIG_VERSION from great_expectations.data_context.types.resource_identifiers import ( ExpectationSuiteIdentifier, RunIdentifier, @@ -427,7 +427,7 @@ def load_data_context_with_error_handling( ge_config_version: int = context.get_config().config_version if ( from_cli_upgrade_command - and int(ge_config_version) < CURRENT_GE_CONFIG_VERSION + and int(ge_config_version) < CURRENT_GX_CONFIG_VERSION ): directory = directory or context.root_directory ( @@ -452,7 +452,7 @@ def load_data_context_with_error_handling( if ge_config_version else None ) - if upgrade_helper_class and ge_config_version < CURRENT_GE_CONFIG_VERSION: + if upgrade_helper_class and ge_config_version < CURRENT_GX_CONFIG_VERSION: upgrade_project( context_root_dir=directory, ge_config_version=ge_config_version, @@ -485,13 +485,13 @@ def upgrade_project( message = ( f"\nYour project appears to have an out-of-date config version ({ge_config_version}) - " f"the version " - f"number must be at least {CURRENT_GE_CONFIG_VERSION}." + f"number must be at least {CURRENT_GX_CONFIG_VERSION}." ) else: message = ( f"\nYour project appears to have an out-of-date config version ({ge_config_version}) - " f"the version " - f"number must be at least {CURRENT_GE_CONFIG_VERSION}.\nIn order to proceed, " + f"number must be at least {CURRENT_GX_CONFIG_VERSION}.\nIn order to proceed, " f"your project must be upgraded." ) @@ -506,7 +506,7 @@ def upgrade_project( cli_message(SECTION_SEPARATOR) # use loop in case multiple upgrades need to take place - while ge_config_version < CURRENT_GE_CONFIG_VERSION: + while ge_config_version < CURRENT_GX_CONFIG_VERSION: ( increment_version, exception_occurred, @@ -532,7 +532,7 @@ def upgrade_project( https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html """ - if ge_config_version < CURRENT_GE_CONFIG_VERSION: + if ge_config_version < CURRENT_GX_CONFIG_VERSION: cli_message(upgrade_incomplete_message) else: cli_message(upgrade_success_message) @@ -549,9 +549,9 @@ def upgrade_project_up_to_one_version_increment( if not upgrade_helper_class: return False, False target_ge_config_version = int(ge_config_version) + 1 - # set version temporarily to CURRENT_GE_CONFIG_VERSION to get functional DataContext + # set version temporarily to CURRENT_GX_CONFIG_VERSION to get functional DataContext DataContext.set_ge_config_version( - config_version=CURRENT_GE_CONFIG_VERSION, + config_version=CURRENT_GX_CONFIG_VERSION, context_root_dir=context_root_dir, ) upgrade_helper = upgrade_helper_class(context_root_dir=context_root_dir) diff --git a/great_expectations/core/evaluation_parameters.py b/great_expectations/core/evaluation_parameters.py index 6747070c595c..db736dacc875 100644 --- a/great_expectations/core/evaluation_parameters.py +++ b/great_expectations/core/evaluation_parameters.py @@ -250,15 +250,15 @@ def build_evaluation_parameters( def find_evaluation_parameter_dependencies(parameter_expression): - """Parse a parameter expression to identify dependencies including GE URNs. + """Parse a parameter expression to identify dependencies including GX URNs. Args: parameter_expression: the parameter to parse Returns: a dictionary including: - - "urns": set of strings that are valid GE URN objects - - "other": set of non-GE URN strings that are required to evaluate the parameter expression + - "urns": set of strings that are valid GX URN objects + - "other": set of non-GX URN strings that are required to evaluate the parameter expression """ expr = EvaluationParameterParser() @@ -328,7 +328,7 @@ def parse_evaluation_parameter( # noqa: C901 - complexity 19 Valid variables must begin with an alphabetic character and may contain alphanumeric characters plus '_' and '$', EXCEPT if they begin with the string "urn:great_expectations" in which case they may also include additional - characters to support inclusion of GE URLs (see :ref:`evaluation_parameters` for more information). + characters to support inclusion of GX URLs (see :ref:`evaluation_parameters` for more information). """ if evaluation_parameters is None: evaluation_parameters = {} diff --git a/great_expectations/core/usage_statistics/anonymizers/base.py b/great_expectations/core/usage_statistics/anonymizers/base.py index 28d50351f858..3c2971452603 100644 --- a/great_expectations/core/usage_statistics/anonymizers/base.py +++ b/great_expectations/core/usage_statistics/anonymizers/base.py @@ -14,9 +14,9 @@ class BaseAnonymizer(ABC): # Any class that starts with this __module__ is considered a "core" object - CORE_GE_OBJECT_MODULE_PREFIX = "great_expectations" + CORE_GX_OBJECT_MODULE_PREFIX = "great_expectations" - CORE_GE_EXPECTATION_TYPES = aggregate_all_core_expectation_types() + CORE_GX_EXPECTATION_TYPES = aggregate_all_core_expectation_types() def __init__(self, salt: Optional[str] = None) -> None: if salt is not None and not isinstance(salt, str): @@ -44,9 +44,9 @@ def get_parent_class( object_class: Optional[type] = None, object_config: Optional[dict] = None, ) -> Optional[str]: - """Check if the parent class is a subclass of any core GE class. + """Check if the parent class is a subclass of any core GX class. - These anonymizers define and provide an optional list of core GE classes_to_check. + These anonymizers define and provide an optional list of core GX classes_to_check. If not provided, the object's inheritance hierarchy is traversed. Args: @@ -170,7 +170,7 @@ def _anonymize_object_info( else: # Chetan - 20220311 - If we can't identify the class in question, we iterate through the parents. - # While GE rarely utilizes multiple inheritance when defining core objects (as of v0.14.10), + # While GX rarely utilizes multiple inheritance when defining core objects (as of v0.14.10), # it is important to recognize that this is possibility. # # In the presence of multiple valid parents, we generate a comma-delimited list. @@ -211,4 +211,4 @@ def _anonymize_object_info( @staticmethod def _is_core_great_expectations_class(class_name: str) -> bool: - return class_name.startswith(BaseAnonymizer.CORE_GE_OBJECT_MODULE_PREFIX) + return class_name.startswith(BaseAnonymizer.CORE_GX_OBJECT_MODULE_PREFIX) diff --git a/great_expectations/core/usage_statistics/anonymizers/expectation_anonymizer.py b/great_expectations/core/usage_statistics/anonymizers/expectation_anonymizer.py index 005cdc57c6e4..793fd76cfbef 100644 --- a/great_expectations/core/usage_statistics/anonymizers/expectation_anonymizer.py +++ b/great_expectations/core/usage_statistics/anonymizers/expectation_anonymizer.py @@ -56,7 +56,7 @@ def _anonymize_expectation( expectation_type (Optional[str]): The string name of the Expectation. info_dict (dict): A dictionary to update within this function. """ - if expectation_type in self.CORE_GE_EXPECTATION_TYPES: + if expectation_type in self.CORE_GX_EXPECTATION_TYPES: info_dict["expectation_type"] = expectation_type else: info_dict["anonymized_expectation_type"] = self._anonymize_string( diff --git a/great_expectations/core/usage_statistics/anonymizers/profiler_anonymizer.py b/great_expectations/core/usage_statistics/anonymizers/profiler_anonymizer.py index cb5e671d52db..e02fb983e3e8 100644 --- a/great_expectations/core/usage_statistics/anonymizers/profiler_anonymizer.py +++ b/great_expectations/core/usage_statistics/anonymizers/profiler_anonymizer.py @@ -230,7 +230,7 @@ def _anonymize_expectation( expectation_type (Optional[str]): The string name of the Expectation. info_dict (dict): A dictionary to update within this function. """ - if expectation_type in self.CORE_GE_EXPECTATION_TYPES: + if expectation_type in self.CORE_GX_EXPECTATION_TYPES: info_dict["expectation_type"] = expectation_type else: info_dict["anonymized_expectation_type"] = self._anonymize_string( diff --git a/great_expectations/core/usage_statistics/execution_environment.py b/great_expectations/core/usage_statistics/execution_environment.py index 0ec7011ab0d2..f778a929dfa1 100644 --- a/great_expectations/core/usage_statistics/execution_environment.py +++ b/great_expectations/core/usage_statistics/execution_environment.py @@ -5,7 +5,7 @@ access to features of new package versions. Typical usage example: - ge_execution_environment = GEExecutionEnvironment() + ge_execution_environment = GXExecutionEnvironment() dependencies: List[PackageInfo] = ge_execution_environment.dependencies """ @@ -18,7 +18,7 @@ from marshmallow import Schema, fields from packaging import version -from great_expectations.core.usage_statistics.package_dependencies import GEDependencies +from great_expectations.core.usage_statistics.package_dependencies import GXDependencies if sys.version_info < (3, 8): # Note: importlib_metadata is included in the python standard library as importlib @@ -49,8 +49,8 @@ class PackageInfoSchema(Schema): version = fields.Str(required=False, allow_none=True) -class GEExecutionEnvironment: - """The list of GE dependencies with version and install information. +class GXExecutionEnvironment: + """The list of GX dependencies with version and install information. This does not return any dependencies that are not specified directly in either requirements.txt or any requirements-dev*.txt files. @@ -59,7 +59,7 @@ class GEExecutionEnvironment: """ def __init__(self) -> None: - self._ge_dependencies = GEDependencies() + self._ge_dependencies = GXDependencies() self._all_installed_packages: List[str] = [] self._get_all_installed_packages() @@ -69,7 +69,7 @@ def __init__(self) -> None: @property def dependencies(self) -> List[PackageInfo]: - """The list of GE dependencies with version and install information. + """The list of GX dependencies with version and install information. This does not return any dependencies that are not specified directly in either requirements.txt or any requirements-dev*.txt files. diff --git a/great_expectations/core/usage_statistics/package_dependencies.py b/great_expectations/core/usage_statistics/package_dependencies.py index 1fb4b6b18071..58366d8efcb8 100644 --- a/great_expectations/core/usage_statistics/package_dependencies.py +++ b/great_expectations/core/usage_statistics/package_dependencies.py @@ -1,15 +1,15 @@ -"""Provide GE package dependencies. +"""Provide GX package dependencies. -This module contains static lists of GE dependencies, along with a utility for +This module contains static lists of GX dependencies, along with a utility for checking and updating these static lists. Typical usage example: - ge_dependencies = GEDependencies() + ge_dependencies = GXDependencies() print(ge_dependencies.get_required_dependency_names()) print(ge_dependencies.get_dev_dependency_names()) To verify lists are accurate, you can run this file or execute main() from - within a cloned GE repository. This will check the existing requirements + within a cloned GX repository. This will check the existing requirements files against the static lists returned via the methods above in the usage example and raise exceptions if there are discrepancies. """ @@ -18,7 +18,7 @@ from typing import Dict, List, Set -class GEDependencies: +class GXDependencies: """Store and provide dependencies when requested. Also acts as a utility to check stored dependencies match our @@ -28,7 +28,7 @@ class GEDependencies: """ """This list should be kept in sync with our requirements.txt file.""" - GE_REQUIRED_DEPENDENCIES: List[str] = sorted( + GX_REQUIRED_DEPENDENCIES: List[str] = sorted( [ "altair", "Click", @@ -63,7 +63,7 @@ class GEDependencies: ) """This list should be kept in sync with our requirements-dev*.txt files.""" - ALL_GE_DEV_DEPENDENCIES: List[str] = sorted( + ALL_GX_DEV_DEPENDENCIES: List[str] = sorted( [ "PyMySQL", "azure-identity", @@ -119,7 +119,7 @@ class GEDependencies: ] ) - GE_DEV_DEPENDENCIES_EXCLUDED_FROM_TRACKING: List[str] = [ + GX_DEV_DEPENDENCIES_EXCLUDED_FROM_TRACKING: List[str] = [ # requirements-dev-contrib.txt: "black", "flake8", @@ -206,8 +206,8 @@ class GEDependencies: "zipcodes", ] - GE_DEV_DEPENDENCIES: Set[str] = set(ALL_GE_DEV_DEPENDENCIES) - set( - GE_DEV_DEPENDENCIES_EXCLUDED_FROM_TRACKING + GX_DEV_DEPENDENCIES: Set[str] = set(ALL_GX_DEV_DEPENDENCIES) - set( + GX_DEV_DEPENDENCIES_EXCLUDED_FROM_TRACKING ) DEV_REQUIREMENTS_PREFIX = "requirements-dev" @@ -231,12 +231,12 @@ def _init_requirements_paths(self) -> Dict[str, pathlib.Path]: return req_dict def get_required_dependency_names(self) -> List[str]: - """Sorted list of required GE dependencies""" - return self.GE_REQUIRED_DEPENDENCIES + """Sorted list of required GX dependencies""" + return self.GX_REQUIRED_DEPENDENCIES def get_dev_dependency_names(self) -> Set[str]: - """Set of dev GE dependencies""" - return self.GE_DEV_DEPENDENCIES + """Set of dev GX dependencies""" + return self.GX_DEV_DEPENDENCIES def get_required_dependency_names_from_requirements_file(self) -> List[str]: """Get unique names of required dependencies. @@ -310,7 +310,7 @@ def _get_dependency_names(self, dependencies: List[str]) -> List[str]: def main() -> None: """Run this module to generate a list of packages from requirements files to update our static lists""" - ge_dependencies = GEDependencies() + ge_dependencies = GXDependencies() print("\n\nRequired Dependencies:\n\n") print(ge_dependencies.get_required_dependency_names_from_requirements_file()) print("\n\nDev Dependencies:\n\n") @@ -318,14 +318,14 @@ def main() -> None: assert ( ge_dependencies.get_required_dependency_names() == ge_dependencies.get_required_dependency_names_from_requirements_file() - ), "Mismatch between required dependencies in requirements files and in GEDependencies" + ), "Mismatch between required dependencies in requirements files and in GXDependencies" assert ge_dependencies.get_dev_dependency_names() == set( ge_dependencies.get_dev_dependency_names_from_requirements_file() ) - set( - GEDependencies.GE_DEV_DEPENDENCIES_EXCLUDED_FROM_TRACKING - ), "Mismatch between dev dependencies in requirements files and in GEDependencies" + GXDependencies.GX_DEV_DEPENDENCIES_EXCLUDED_FROM_TRACKING + ), "Mismatch between dev dependencies in requirements files and in GXDependencies" print( - "\n\nRequired and Dev dependencies in requirements files match those in GEDependencies" + "\n\nRequired and Dev dependencies in requirements files match those in GXDependencies" ) diff --git a/great_expectations/core/usage_statistics/usage_statistics.py b/great_expectations/core/usage_statistics/usage_statistics.py index 35da443fdadf..f541a9be15cd 100644 --- a/great_expectations/core/usage_statistics/usage_statistics.py +++ b/great_expectations/core/usage_statistics/usage_statistics.py @@ -27,7 +27,7 @@ ) from great_expectations.core.usage_statistics.events import UsageStatsEvents from great_expectations.core.usage_statistics.execution_environment import ( - GEExecutionEnvironment, + GXExecutionEnvironment, PackageInfo, PackageInfoSchema, ) @@ -160,8 +160,8 @@ def build_init_payload(self) -> dict: @staticmethod def _get_serialized_dependencies() -> List[dict]: - """Get the serialized dependencies from the GEExecutionEnvironment.""" - ge_execution_environment = GEExecutionEnvironment() + """Get the serialized dependencies from the GXExecutionEnvironment.""" + ge_execution_environment = GXExecutionEnvironment() dependencies: List[PackageInfo] = ge_execution_environment.dependencies schema = PackageInfoSchema() diff --git a/great_expectations/core/yaml_handler.py b/great_expectations/core/yaml_handler.py index 24c35a1a952f..078bda6c3320 100644 --- a/great_expectations/core/yaml_handler.py +++ b/great_expectations/core/yaml_handler.py @@ -33,7 +33,7 @@ class YAMLHandler: def __init__(self) -> None: self._handler = YAML(typ="safe") - # TODO: ensure this does not break all usage of ruamel in GE codebase. + # TODO: ensure this does not break all usage of ruamel in GX codebase. self._handler.indent(mapping=2, sequence=4, offset=2) self._handler.default_flow_style = False diff --git a/great_expectations/data_context/config_validator/yaml_config_validator.py b/great_expectations/data_context/config_validator/yaml_config_validator.py index f51d0b18b43e..000775f63d62 100644 --- a/great_expectations/data_context/config_validator/yaml_config_validator.py +++ b/great_expectations/data_context/config_validator/yaml_config_validator.py @@ -283,7 +283,7 @@ def test_yaml_config( # noqa: C901 - complexity 17 usage_stats_event_payload.get("parent_class") is None and class_name in self.ALL_TEST_YAML_CONFIG_SUPPORTED_TYPES ): - # add parent_class if it doesn't exist and class_name is one of our supported core GE types + # add parent_class if it doesn't exist and class_name is one of our supported core GX types usage_stats_event_payload["parent_class"] = class_name send_usage_message_from_handler( event=usage_stats_event_name, diff --git a/great_expectations/data_context/data_context/abstract_data_context.py b/great_expectations/data_context/data_context/abstract_data_context.py index bd986720cc1f..1388993de507 100644 --- a/great_expectations/data_context/data_context/abstract_data_context.py +++ b/great_expectations/data_context/data_context/abstract_data_context.py @@ -69,7 +69,7 @@ from great_expectations.data_context.store.validations_store import ValidationsStore from great_expectations.data_context.templates import CONFIG_VARIABLES_TEMPLATE from great_expectations.data_context.types.base import ( - CURRENT_GE_CONFIG_VERSION, + CURRENT_GX_CONFIG_VERSION, AnonymizedUsageStatisticsConfig, CheckpointConfig, ConcurrencyConfig, @@ -431,7 +431,7 @@ def checkpoint_store_name(self) -> Optional[str]: f"with no `checkpoints` directory.\n " f"Please create the following directory: {checkpoint_store_directory}.\n " f"To use the new 'Checkpoint Store' feature, please update your configuration " - f"to the new version number {float(CURRENT_GE_CONFIG_VERSION)}.\n " + f"to the new version number {float(CURRENT_GX_CONFIG_VERSION)}.\n " f"Visit {AbstractDataContext.MIGRATION_WEBSITE} " f"to learn more about the upgrade process." ) @@ -441,7 +441,7 @@ def checkpoint_store_name(self) -> Optional[str]: f"with no `checkpoints` directory.\n " f"Please create a `checkpoints` directory in your Great Expectations directory." f"To use the new 'Checkpoint Store' feature, please update your configuration " - f"to the new version number {float(CURRENT_GE_CONFIG_VERSION)}.\n " + f"to the new version number {float(CURRENT_GX_CONFIG_VERSION)}.\n " f"Visit {AbstractDataContext.MIGRATION_WEBSITE} " f"to learn more about the upgrade process." ) @@ -464,7 +464,7 @@ def checkpoint_store(self) -> CheckpointStore: logger.warning( f"Checkpoint store named '{checkpoint_store_name}' is not a configured store, " f"so will try to use default Checkpoint store.\n Please update your configuration " - f"to the new version number {float(CURRENT_GE_CONFIG_VERSION)} in order to use the new " + f"to the new version number {float(CURRENT_GX_CONFIG_VERSION)} in order to use the new " f"'Checkpoint Store' feature.\n Visit {AbstractDataContext.MIGRATION_WEBSITE} " f"to learn more about the upgrade process." ) @@ -497,7 +497,7 @@ def profiler_store_name(self) -> Optional[str]: f"with no `profilers` directory.\n " f"Please create the following directory: {checkpoint_store_directory}\n" f"To use the new 'Profiler Store' feature, please update your configuration " - f"to the new version number {float(CURRENT_GE_CONFIG_VERSION)}.\n " + f"to the new version number {float(CURRENT_GX_CONFIG_VERSION)}.\n " f"Visit {AbstractDataContext.MIGRATION_WEBSITE} to learn more about the " f"upgrade process." ) @@ -508,7 +508,7 @@ def profiler_store_name(self) -> Optional[str]: f"Please create a `profilers` directory in your Great Expectations project " f"directory.\n " f"To use the new 'Profiler Store' feature, please update your configuration " - f"to the new version number {float(CURRENT_GE_CONFIG_VERSION)}.\n " + f"to the new version number {float(CURRENT_GX_CONFIG_VERSION)}.\n " f"Visit {AbstractDataContext.MIGRATION_WEBSITE} to learn more about the " f"upgrade process." ) @@ -527,7 +527,7 @@ def profiler_store(self) -> ProfilerStore: logger.warning( f"Profiler store named '{profiler_store_name}' is not a configured store, so will try to use " f"default Profiler store.\n Please update your configuration to the new version number " - f"{float(CURRENT_GE_CONFIG_VERSION)} in order to use the new 'Profiler Store' feature.\n " + f"{float(CURRENT_GX_CONFIG_VERSION)} in order to use the new 'Profiler Store' feature.\n " f"Visit {AbstractDataContext.MIGRATION_WEBSITE} to learn more about the upgrade process." ) built_store: Optional[Store] = self._build_store_from_config( @@ -615,7 +615,7 @@ def add_datasource( name: the name for the new datasource to add initialize: if False, add the datasource to the config, but do not initialize it, for example if a user needs to debug database connectivity. - save_changes (bool): should GE save the Datasource config? + save_changes (bool): should GX save the Datasource config? kwargs (keyword arguments): the configuration for the new datasource Returns: @@ -686,9 +686,9 @@ def get_config_with_variables_substituted( ) -> DataContextConfig: """ Substitute vars in config of form ${var} or $(var) with values found in the following places, - in order of precedence: ge_cloud_config (for Data Contexts in GE Cloud mode), runtime_environment, + in order of precedence: ge_cloud_config (for Data Contexts in GX Cloud mode), runtime_environment, environment variables, config_variables, or ge_cloud_config_variable_defaults (allows certain variables to - be optional in GE Cloud mode). + be optional in GX Cloud mode). """ if not config: config = self._project_config @@ -1731,12 +1731,12 @@ def get_expectation_suite( include_rendered_content: Optional[bool] = None, ge_cloud_id: Optional[str] = None, ) -> ExpectationSuite: - """Get an Expectation Suite by name or GE Cloud ID + """Get an Expectation Suite by name or GX Cloud ID Args: expectation_suite_name (str): The name of the Expectation Suite include_rendered_content (bool): Whether or not to re-populate rendered_content for each ExpectationConfiguration. - ge_cloud_id (str): The GE Cloud ID for the Expectation Suite. + ge_cloud_id (str): The GX Cloud ID for the Expectation Suite. Returns: An existing ExpectationSuite @@ -1837,7 +1837,7 @@ def run_profiler_with_dynamic_arguments( batch_list: Explicit list of Batch objects to supply data at runtime batch_request: Explicit batch_request used to supply data at runtime name: Identifier used to retrieve the profiler from a store. - ge_cloud_id: Identifier used to retrieve the profiler from a store (GE Cloud specific). + ge_cloud_id: Identifier used to retrieve the profiler from a store (GX Cloud specific). variables: Attribute name/value pairs (overrides) rules: Key-value pairs of name/configuration-dictionary (overrides) @@ -1875,7 +1875,7 @@ def run_profiler_on_data( batch_list: Explicit list of Batch objects to supply data at runtime. batch_request: Explicit batch_request used to supply data at runtime. name: Identifier used to retrieve the profiler from a store. - ge_cloud_id: Identifier used to retrieve the profiler from a store (GE Cloud specific). + ge_cloud_id: Identifier used to retrieve the profiler from a store (GX Cloud specific). Returns: Set of rule evaluation results in the form of an RuleBasedProfilerResult @@ -2942,7 +2942,7 @@ def _init_datasources(self) -> None: self._cached_datasources[datasource_name] = datasource except ge_exceptions.DatasourceInitializationError as e: logger.warning(f"Cannot initialize datasource {datasource_name}: {e}") - # this error will happen if our configuration contains datasources that GE can no longer connect to. + # this error will happen if our configuration contains datasources that GX can no longer connect to. # this is ok, as long as we don't use it to retrieve a batch. If we try to do that, the error will be # caught at the context.get_batch() step. So we just pass here. pass diff --git a/great_expectations/data_context/data_context/base_data_context.py b/great_expectations/data_context/data_context/base_data_context.py index 35f902f8067c..a0bd8c721fae 100644 --- a/great_expectations/data_context/data_context/base_data_context.py +++ b/great_expectations/data_context/data_context/base_data_context.py @@ -150,17 +150,17 @@ class BaseDataContext(EphemeralDataContext, ConfigPeer): """ UNCOMMITTED_DIRECTORIES = ["data_docs", "validations"] - GE_UNCOMMITTED_DIR = "uncommitted" + GX_UNCOMMITTED_DIR = "uncommitted" BASE_DIRECTORIES = [ DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value, DataContextConfigDefaults.EXPECTATIONS_BASE_DIRECTORY.value, DataContextConfigDefaults.PLUGINS_BASE_DIRECTORY.value, DataContextConfigDefaults.PROFILERS_BASE_DIRECTORY.value, - GE_UNCOMMITTED_DIR, + GX_UNCOMMITTED_DIR, ] - GE_DIR = "great_expectations" - GE_YML = "great_expectations.yml" # TODO: migrate this to FileDataContext. Still needed by DataContext - GE_EDIT_NOTEBOOK_DIR = GE_UNCOMMITTED_DIR + GX_DIR = "great_expectations" + GX_YML = "great_expectations.yml" # TODO: migrate this to FileDataContext. Still needed by DataContext + GX_EDIT_NOTEBOOK_DIR = GX_UNCOMMITTED_DIR DOLLAR_SIGN_ESCAPE_STRING = r"\$" @usage_statistics_enabled_method( @@ -328,9 +328,9 @@ def add_datasource( Args: name (str): Name of Datasource - initialize (bool): Should GE add and initialize the Datasource? If true then current + initialize (bool): Should GX add and initialize the Datasource? If true then current method will return initialized Datasource - save_changes (Optional[bool]): should GE save the Datasource config? + save_changes (Optional[bool]): should GX save the Datasource config? **kwargs Optional[dict]: Additional kwargs that define Datasource initialization kwargs Returns: @@ -371,7 +371,7 @@ def get_expectation_suite( expectation_suite_name (str): The name of the Expectation Suite include_rendered_content (bool): Whether or not to re-populate rendered_content for each ExpectationConfiguration. - ge_cloud_id (str): The GE Cloud ID for the Expectation Suite. + ge_cloud_id (str): The GX Cloud ID for the Expectation Suite. Returns: An existing ExpectationSuite diff --git a/great_expectations/data_context/data_context/cloud_data_context.py b/great_expectations/data_context/data_context/cloud_data_context.py index 85ffbba94f47..202e4bf88ab7 100644 --- a/great_expectations/data_context/data_context/cloud_data_context.py +++ b/great_expectations/data_context/data_context/cloud_data_context.py @@ -170,9 +170,9 @@ def retrieve_data_context_config_from_ge_cloud( """ Utilizes the GeCloudConfig instantiated in the constructor to create a request to the Cloud API. Given proper authorization, the request retrieves a data context config that is pre-populated with - GE objects specific to the user's Cloud environment (datasources, data connectors, etc). + GX objects specific to the user's Cloud environment (datasources, data connectors, etc). - Please note that substitution for ${VAR} variables is performed in GE Cloud before being sent + Please note that substitution for ${VAR} variables is performed in GX Cloud before being sent over the wire. :return: the configuration object retrieved from the Cloud API @@ -191,7 +191,7 @@ def retrieve_data_context_config_from_ge_cloud( response = requests.get(ge_cloud_url, headers=headers) if response.status_code != 200: raise ge_exceptions.GXCloudError( - f"Bad request made to GE Cloud; {response.text}" + f"Bad request made to GX Cloud; {response.text}" ) config = response.json() return DataContextConfig(**config) @@ -222,7 +222,7 @@ def get_ge_cloud_config( GeCloudConfig Raises: - GeCloudError if a GE Cloud variable is missing + GeCloudError if a GX Cloud variable is missing """ ge_cloud_config_dict = cls._get_ge_cloud_config_dict( ge_cloud_base_url=ge_cloud_base_url, @@ -324,7 +324,7 @@ def _init_datasource_store(self) -> None: def list_expectation_suite_names(self) -> List[str]: """ Lists the available expectation suite names. If in ge_cloud_mode, a list of - GE Cloud ids is returned instead. + GX Cloud ids is returned instead. """ return [suite_key.resource_name for suite_key in self.list_expectation_suites()] # type: ignore[union-attr] @@ -366,9 +366,9 @@ def get_config_with_variables_substituted( ) -> DataContextConfig: """ Substitute vars in config of form ${var} or $(var) with values found in the following places, - in order of precedence: ge_cloud_config (for Data Contexts in GE Cloud mode), runtime_environment, + in order of precedence: ge_cloud_config (for Data Contexts in GX Cloud mode), runtime_environment, environment variables, config_variables, or ge_cloud_config_variable_defaults (allows certain variables to - be optional in GE Cloud mode). + be optional in GX Cloud mode). """ if not config: config = self.config @@ -479,12 +479,12 @@ def get_expectation_suite( include_rendered_content: Optional[bool] = None, ge_cloud_id: Optional[str] = None, ) -> ExpectationSuite: - """Get an Expectation Suite by name or GE Cloud ID + """Get an Expectation Suite by name or GX Cloud ID Args: expectation_suite_name (str): The name of the Expectation Suite include_rendered_content (bool): Whether or not to re-populate rendered_content for each ExpectationConfiguration. - ge_cloud_id (str): The GE Cloud ID for the Expectation Suite. + ge_cloud_id (str): The GX Cloud ID for the Expectation Suite. Returns: An existing ExpectationSuite @@ -571,7 +571,7 @@ def _validate_suite_unique_constaints_before_save( if ge_cloud_id: if self.expectations_store.has_key(key): # noqa: W601 raise ge_exceptions.DataContextError( - f"expectation_suite with GE Cloud ID {ge_cloud_id} already exists. " + f"expectation_suite with GX Cloud ID {ge_cloud_id} already exists. " f"If you would like to overwrite this expectation_suite, set overwrite_existing=True." ) diff --git a/great_expectations/data_context/data_context/data_context.py b/great_expectations/data_context/data_context/data_context.py index 3bb13001edb5..c33e041d440e 100644 --- a/great_expectations/data_context/data_context/data_context.py +++ b/great_expectations/data_context/data_context/data_context.py @@ -23,7 +23,7 @@ PROJECT_TEMPLATE_USAGE_STATISTICS_ENABLED, ) from great_expectations.data_context.types.base import ( - CURRENT_GE_CONFIG_VERSION, + CURRENT_GX_CONFIG_VERSION, MINIMUM_SUPPORTED_CONFIG_VERSION, AnonymizedUsageStatisticsConfig, DataContextConfig, @@ -50,7 +50,7 @@ class DataContext(BaseDataContext): The DataContext is configured via a yml file stored in a directory called great_expectations; this configuration file as well as managed Expectation Suites should be stored in version control. There are other ways to create a - Data Context that may be better suited for your particular deployment e.g. ephemerally or backed by GE Cloud + Data Context that may be better suited for your particular deployment e.g. ephemerally or backed by GX Cloud (coming soon). Please refer to our documentation for more details. You can Validate data or generate Expectations using Execution Engines including: @@ -111,18 +111,18 @@ def create( "to initialize a new DataContext" ) - ge_dir = os.path.join(project_root_dir, cls.GE_DIR) # type: ignore[arg-type] + ge_dir = os.path.join(project_root_dir, cls.GX_DIR) # type: ignore[arg-type] os.makedirs(ge_dir, exist_ok=True) cls.scaffold_directories(ge_dir) - if os.path.isfile(os.path.join(ge_dir, cls.GE_YML)): - message = f"""Warning. An existing `{cls.GE_YML}` was found here: {ge_dir}. + if os.path.isfile(os.path.join(ge_dir, cls.GX_YML)): + message = f"""Warning. An existing `{cls.GX_YML}` was found here: {ge_dir}. - No action was taken.""" warnings.warn(message) else: cls.write_project_template_to_disk(ge_dir, usage_statistics_enabled) - uncommitted_dir = os.path.join(ge_dir, cls.GE_UNCOMMITTED_DIR) + uncommitted_dir = os.path.join(ge_dir, cls.GX_UNCOMMITTED_DIR) if os.path.isfile(os.path.join(uncommitted_dir, "config_variables.yml")): message = """Warning. An existing `config_variables.yml` was found here: {}. - No action was taken.""".format( @@ -137,7 +137,7 @@ def create( @classmethod def all_uncommitted_directories_exist(cls, ge_dir: str) -> bool: """Check if all uncommitted directories exist.""" - uncommitted_dir = os.path.join(ge_dir, cls.GE_UNCOMMITTED_DIR) + uncommitted_dir = os.path.join(ge_dir, cls.GX_UNCOMMITTED_DIR) for directory in cls.UNCOMMITTED_DIRECTORIES: if not os.path.isdir(os.path.join(uncommitted_dir, directory)): return False @@ -147,7 +147,7 @@ def all_uncommitted_directories_exist(cls, ge_dir: str) -> bool: @classmethod def config_variables_yml_exist(cls, ge_dir: str) -> bool: """Check if all config_variables.yml exists.""" - path_to_yml = os.path.join(ge_dir, cls.GE_YML) + path_to_yml = os.path.join(ge_dir, cls.GX_YML) # TODO this is so brittle and gross with open(path_to_yml) as f: @@ -167,7 +167,7 @@ def write_config_variables_template_to_disk(cls, uncommitted_dir: str) -> None: def write_project_template_to_disk( cls, ge_dir: str, usage_statistics_enabled: bool = True ) -> None: - file_path = os.path.join(ge_dir, cls.GE_YML) + file_path = os.path.join(ge_dir, cls.GX_YML) with open(file_path, "w") as template: if usage_statistics_enabled: template.write(PROJECT_TEMPLATE_USAGE_STATISTICS_ENABLED) @@ -176,7 +176,7 @@ def write_project_template_to_disk( @classmethod def scaffold_directories(cls, base_dir: str) -> None: - """Safely create GE directories for a new project.""" + """Safely create GX directories for a new project.""" os.makedirs(base_dir, exist_ok=True) with open(os.path.join(base_dir, ".gitignore"), "w") as f: f.write("uncommitted/") @@ -204,7 +204,7 @@ def scaffold_directories(cls, base_dir: str) -> None: else: os.makedirs(os.path.join(base_dir, directory), exist_ok=True) - uncommitted_dir = os.path.join(base_dir, cls.GE_UNCOMMITTED_DIR) + uncommitted_dir = os.path.join(base_dir, cls.GX_UNCOMMITTED_DIR) for new_directory in cls.UNCOMMITTED_DIRECTORIES: new_directory_path = os.path.join(uncommitted_dir, new_directory) @@ -266,7 +266,7 @@ def _save_project_config(self) -> None: """ logger.debug("Starting DataContext._save_project_config") - config_filepath = os.path.join(self.root_directory, self.GE_YML) # type: ignore[arg-type] + config_filepath = os.path.join(self.root_directory, self.GX_YML) # type: ignore[arg-type] try: with open(config_filepath, "w") as outfile: @@ -385,7 +385,7 @@ def _load_project_config(self): The file may contain ${SOME_VARIABLE} variables - see self.project_config_with_variables_substituted for how these are substituted. - For Data Contexts in GE Cloud mode, a user-specific template is retrieved from the Cloud API + For Data Contexts in GX Cloud mode, a user-specific template is retrieved from the Cloud API - see CloudDataContext.retrieve_data_context_config_from_ge_cloud for more details. :return: the configuration object read from the file or template @@ -398,7 +398,7 @@ def _load_project_config(self): ) return config - path_to_yml = os.path.join(self._context_root_directory, self.GE_YML) + path_to_yml = os.path.join(self._context_root_directory, self.GX_YML) try: with open(path_to_yml) as data: config_commented_map_from_yaml = yaml.load(data) @@ -468,7 +468,7 @@ def delete_datasource(self, name: str) -> None: # type: ignore[override] def find_context_root_dir(cls) -> str: result = None yml_path = None - ge_home_environment = os.getenv("GE_HOME") + ge_home_environment = os.getenv("GX_HOME") if ge_home_environment: ge_home_environment = os.path.expanduser(ge_home_environment) if os.path.isdir(ge_home_environment) and os.path.isfile( @@ -519,10 +519,10 @@ def set_ge_config_version( config_version, MINIMUM_SUPPORTED_CONFIG_VERSION ), ) - elif config_version > CURRENT_GE_CONFIG_VERSION: + elif config_version > CURRENT_GX_CONFIG_VERSION: raise ge_exceptions.UnsupportedConfigVersionError( "Invalid config version ({}).\n The maximum valid version is {}.".format( - config_version, CURRENT_GE_CONFIG_VERSION + config_version, CURRENT_GX_CONFIG_VERSION ), ) @@ -553,10 +553,10 @@ def find_context_yml_file( f"Searching for config file {search_start_dir} ({i} layer deep)" ) - potential_ge_dir = os.path.join(search_start_dir, cls.GE_DIR) + potential_ge_dir = os.path.join(search_start_dir, cls.GX_DIR) if os.path.isdir(potential_ge_dir): - potential_yml = os.path.join(potential_ge_dir, cls.GE_YML) + potential_yml = os.path.join(potential_ge_dir, cls.GX_YML) if os.path.isfile(potential_yml): yml_path = potential_yml logger.debug(f"Found config file at {str(yml_path)}") @@ -569,7 +569,7 @@ def find_context_yml_file( @classmethod def does_config_exist_on_disk(cls, context_root_dir: str) -> bool: """Return True if the great_expectations.yml exists on disk.""" - return os.path.isfile(os.path.join(context_root_dir, cls.GE_YML)) + return os.path.isfile(os.path.join(context_root_dir, cls.GX_YML)) @classmethod def is_project_initialized(cls, ge_dir: str) -> bool: diff --git a/great_expectations/data_context/data_context/explorer_data_context.py b/great_expectations/data_context/data_context/explorer_data_context.py index fecf4c8aeffd..159695bbc94f 100644 --- a/great_expectations/data_context/data_context/explorer_data_context.py +++ b/great_expectations/data_context/data_context/explorer_data_context.py @@ -13,7 +13,7 @@ class ExplorerDataContext(DataContext): def __init__(self, context_root_dir=None, expectation_explorer=True) -> None: """ - expectation_explorer: If True, load the expectation explorer manager, which will modify GE return objects \ + expectation_explorer: If True, load the expectation explorer manager, which will modify GX return objects \ to include ipython notebook widgets. """ diff --git a/great_expectations/data_context/data_context/file_data_context.py b/great_expectations/data_context/data_context/file_data_context.py index 3f3d72ff5b13..398886f971c2 100644 --- a/great_expectations/data_context/data_context/file_data_context.py +++ b/great_expectations/data_context/data_context/file_data_context.py @@ -27,7 +27,7 @@ class FileDataContext(AbstractDataContext): class will exist only for backwards-compatibility reasons. """ - GE_YML = "great_expectations.yml" + GX_YML = "great_expectations.yml" def __init__( self, diff --git a/great_expectations/data_context/data_context_variables.py b/great_expectations/data_context/data_context_variables.py index e29a45f508ae..246521ee681b 100644 --- a/great_expectations/data_context/data_context_variables.py +++ b/great_expectations/data_context/data_context_variables.py @@ -389,7 +389,7 @@ def _init_store(self) -> DataContextStore: def get_key(self) -> GXCloudIdentifier: """ - Generates a GE Cloud-specific key for use with Stores. See parent "DataContextVariables.get_key" for more details. + Generates a GX Cloud-specific key for use with Stores. See parent "DataContextVariables.get_key" for more details. """ from great_expectations.data_context.cloud_constants import GXCloudRESTResource diff --git a/great_expectations/data_context/store/checkpoint_store.py b/great_expectations/data_context/store/checkpoint_store.py index 4b10a6237bb3..1fa478600120 100644 --- a/great_expectations/data_context/store/checkpoint_store.py +++ b/great_expectations/data_context/store/checkpoint_store.py @@ -39,8 +39,8 @@ class CheckpointStore(ConfigurationStore): def ge_cloud_response_json_to_object_dict(self, response_json: Dict) -> Dict: """ - This method takes full json response from GE cloud and outputs a dict appropriate for - deserialization into a GE object + This method takes full json response from GX cloud and outputs a dict appropriate for + deserialization into a GX object """ ge_cloud_checkpoint_id = response_json["data"]["id"] checkpoint_config_dict = response_json["data"]["attributes"][ diff --git a/great_expectations/data_context/store/data_context_store.py b/great_expectations/data_context/store/data_context_store.py index 222cd78c9783..3d8c05961c1b 100644 --- a/great_expectations/data_context/store/data_context_store.py +++ b/great_expectations/data_context/store/data_context_store.py @@ -32,7 +32,7 @@ def serialize(self, value: DataContextConfig) -> Union[dict, str]: """ Please see `ConfigurationStore.serialize` for more information. - Note that GE Cloud utilizes a subset of the config; as such, an explicit + Note that GX Cloud utilizes a subset of the config; as such, an explicit step to remove unnecessary keys is a required part of the serialization process. Args: diff --git a/great_expectations/data_context/store/datasource_store.py b/great_expectations/data_context/store/datasource_store.py index 2ac6dfa1e8d6..e76f2274af13 100644 --- a/great_expectations/data_context/store/datasource_store.py +++ b/great_expectations/data_context/store/datasource_store.py @@ -90,8 +90,8 @@ def deserialize(self, value: Union[dict, DatasourceConfig]) -> DatasourceConfig: def ge_cloud_response_json_to_object_dict(self, response_json: dict) -> dict: """ - This method takes full json response from GE cloud and outputs a dict appropriate for - deserialization into a GE object + This method takes full json response from GX cloud and outputs a dict appropriate for + deserialization into a GX object """ datasource_ge_cloud_id: str = response_json["data"]["id"] datasource_config_dict: dict = response_json["data"]["attributes"][ diff --git a/great_expectations/data_context/store/expectations_store.py b/great_expectations/data_context/store/expectations_store.py index fba1772b6b65..4b74e6e525b7 100644 --- a/great_expectations/data_context/store/expectations_store.py +++ b/great_expectations/data_context/store/expectations_store.py @@ -163,8 +163,8 @@ def __init__( def ge_cloud_response_json_to_object_dict(self, response_json: Dict) -> Dict: """ - This method takes full json response from GE cloud and outputs a dict appropriate for - deserialization into a GE object + This method takes full json response from GX cloud and outputs a dict appropriate for + deserialization into a GX object """ ge_cloud_expectation_suite_id = response_json["data"]["id"] expectation_suite_dict = response_json["data"]["attributes"]["suite"] diff --git a/great_expectations/data_context/store/gx_cloud_store_backend.py b/great_expectations/data_context/store/gx_cloud_store_backend.py index 4a5cc46bda9e..a815bfbdf186 100644 --- a/great_expectations/data_context/store/gx_cloud_store_backend.py +++ b/great_expectations/data_context/store/gx_cloud_store_backend.py @@ -222,21 +222,21 @@ def _get(self, key: Tuple[str, ...]) -> ResponsePayload: # type: ignore[overrid return cast(ResponsePayload, response.json()) except json.JSONDecodeError as jsonError: logger.debug( - "Failed to parse GE Cloud Response into JSON", + "Failed to parse GX Cloud Response into JSON", str(response.text), str(jsonError), ) raise StoreBackendError( - f"Unable to get object in GE Cloud Store Backend: {jsonError}" + f"Unable to get object in GX Cloud Store Backend: {jsonError}" ) except requests.HTTPError as http_err: raise StoreBackendError( - f"Unable to get object in GE Cloud Store Backend: {get_user_friendly_error_message(http_err)}" + f"Unable to get object in GX Cloud Store Backend: {get_user_friendly_error_message(http_err)}" ) except requests.Timeout as timeout_exc: logger.exception(timeout_exc) raise StoreBackendError( - "Unable to get object in GE Cloud Store Backend: This is likely a transient error. Please try again." + "Unable to get object in GX Cloud Store Backend: This is likely a transient error. Please try again." ) def _move(self) -> None: # type: ignore[override] @@ -284,17 +284,17 @@ def _update(self, ge_cloud_id: str, value: Any) -> bool: except requests.HTTPError as http_exc: raise StoreBackendError( - f"Unable to update object in GE Cloud Store Backend: {get_user_friendly_error_message(http_exc)}" + f"Unable to update object in GX Cloud Store Backend: {get_user_friendly_error_message(http_exc)}" ) except requests.Timeout as timeout_exc: logger.exception(timeout_exc) raise StoreBackendError( - "Unable to update object in GE Cloud Store Backend: This is likely a transient error. Please try again." + "Unable to update object in GX Cloud Store Backend: This is likely a transient error. Please try again." ) except Exception as e: logger.debug(str(e)) raise StoreBackendError( - f"Unable to update object in GE Cloud Store Backend: {e}" + f"Unable to update object in GX Cloud Store Backend: {e}" ) @property @@ -369,17 +369,17 @@ def _set( # type: ignore[override] ) except requests.HTTPError as http_exc: raise StoreBackendError( - f"Unable to set object in GE Cloud Store Backend: {get_user_friendly_error_message(http_exc)}" + f"Unable to set object in GX Cloud Store Backend: {get_user_friendly_error_message(http_exc)}" ) except requests.Timeout as timeout_exc: logger.exception(timeout_exc) raise StoreBackendError( - "Unable to set object in GE Cloud Store Backend: This is likely a transient error. Please try again." + "Unable to set object in GX Cloud Store Backend: This is likely a transient error. Please try again." ) except Exception as e: logger.debug(str(e)) raise StoreBackendError( - f"Unable to set object in GE Cloud Store Backend: {e}" + f"Unable to set object in GX Cloud Store Backend: {e}" ) @property @@ -437,7 +437,7 @@ def list_keys(self, prefix: Tuple = ()) -> List[Tuple[GXCloudRESTResource, str, except Exception as e: logger.debug(str(e)) raise StoreBackendError( - f"Unable to list keys in GE Cloud Store Backend: {e}" + f"Unable to list keys in GX Cloud Store Backend: {e}" ) def get_url_for_key( # type: ignore[override] @@ -482,18 +482,18 @@ def remove_key(self, key): except requests.HTTPError as http_exc: # TODO: GG 20220819 should we raise an error here instead of returning False logger.warning( - f"Unable to delete object in GE Cloud Store Backend: {get_user_friendly_error_message(http_exc)}" + f"Unable to delete object in GX Cloud Store Backend: {get_user_friendly_error_message(http_exc)}" ) return False except requests.Timeout as timeout_exc: logger.exception(timeout_exc) raise StoreBackendError( - "Unable to delete object in GE Cloud Store Backend: This is likely a transient error. Please try again." + "Unable to delete object in GX Cloud Store Backend: This is likely a transient error. Please try again." ) except Exception as e: logger.debug(str(e)) raise StoreBackendError( - f"Unable to delete object in GE Cloud Store Backend: {e}" + f"Unable to delete object in GX Cloud Store Backend: {e}" ) def _has_key(self, key: Tuple[str, ...]) -> bool: diff --git a/great_expectations/data_context/store/html_site_store.py b/great_expectations/data_context/store/html_site_store.py index 72abbb1862bb..8eeee7b73300 100644 --- a/great_expectations/data_context/store/html_site_store.py +++ b/great_expectations/data_context/store/html_site_store.py @@ -421,7 +421,7 @@ def copy_static_assets(self, static_assets_source_dir=None): content_type, content_encoding = guess_type(item, strict=False) if content_type is None: - # Use GE-known content-type if possible + # Use GX-known content-type if possible if source_name.endswith(".otf"): content_type = "font/opentype" else: diff --git a/great_expectations/data_context/store/inline_store_backend.py b/great_expectations/data_context/store/inline_store_backend.py index eaf94c8e99ad..dd07d2c5dcb3 100644 --- a/great_expectations/data_context/store/inline_store_backend.py +++ b/great_expectations/data_context/store/inline_store_backend.py @@ -188,7 +188,7 @@ def _has_key(self, key: Tuple[str, ...]) -> bool: def _save_changes(self) -> None: context = self._data_context - config_filepath = pathlib.Path(context.root_directory) / context.GE_YML # type: ignore[arg-type] + config_filepath = pathlib.Path(context.root_directory) / context.GX_YML # type: ignore[arg-type] try: with open(config_filepath, "w") as outfile: diff --git a/great_expectations/data_context/store/json_site_store.py b/great_expectations/data_context/store/json_site_store.py index 2fae0db3a93d..f97afe47a847 100644 --- a/great_expectations/data_context/store/json_site_store.py +++ b/great_expectations/data_context/store/json_site_store.py @@ -12,7 +12,7 @@ class JsonSiteStore(Store): """ - A JsonSiteStore manages the JSON artifacts of our renderers, which allows us to render them into final views in HTML by GE Cloud. + A JsonSiteStore manages the JSON artifacts of our renderers, which allows us to render them into final views in HTML by GX Cloud. """ @@ -50,8 +50,8 @@ def __init__( def ge_cloud_response_json_to_object_dict(self, response_json: Dict) -> Dict: """ - This method takes full json response from GE cloud and outputs a dict appropriate for - deserialization into a GE object + This method takes full json response from GX cloud and outputs a dict appropriate for + deserialization into a GX object """ ge_cloud_json_site_id = response_json["data"]["id"] json_site_dict = response_json["data"]["attributes"]["rendered_data_doc"] diff --git a/great_expectations/data_context/store/profiler_store.py b/great_expectations/data_context/store/profiler_store.py index 473cbf74e60a..3dcdebbafac3 100644 --- a/great_expectations/data_context/store/profiler_store.py +++ b/great_expectations/data_context/store/profiler_store.py @@ -64,8 +64,8 @@ def serialization_self_check(self, pretty_print: bool) -> None: def ge_cloud_response_json_to_object_dict(self, response_json: dict) -> dict: """ - This method takes full json response from GE cloud and outputs a dict appropriate for - deserialization into a GE object + This method takes full json response from GX cloud and outputs a dict appropriate for + deserialization into a GX object """ ge_cloud_profiler_id = response_json["data"]["id"] profiler_config_dict = response_json["data"]["attributes"]["profiler"] diff --git a/great_expectations/data_context/store/store.py b/great_expectations/data_context/store/store.py index eba6364d4dc5..7fee4200339e 100644 --- a/great_expectations/data_context/store/store.py +++ b/great_expectations/data_context/store/store.py @@ -67,8 +67,8 @@ def __init__( def ge_cloud_response_json_to_object_dict(self, response_json: Dict) -> Dict: """ - This method takes full json response from GE cloud and outputs a dict appropriate for - deserialization into a GE object + This method takes full json response from GX cloud and outputs a dict appropriate for + deserialization into a GX object """ return response_json diff --git a/great_expectations/data_context/store/validations_store.py b/great_expectations/data_context/store/validations_store.py index 59cf1e78c4d1..c103af3a3d72 100644 --- a/great_expectations/data_context/store/validations_store.py +++ b/great_expectations/data_context/store/validations_store.py @@ -153,8 +153,8 @@ def __init__( def ge_cloud_response_json_to_object_dict(self, response_json: Dict) -> Dict: """ - This method takes full json response from GE cloud and outputs a dict appropriate for - deserialization into a GE object + This method takes full json response from GX cloud and outputs a dict appropriate for + deserialization into a GX object """ ge_cloud_suite_validation_result_id = response_json["data"]["id"] suite_validation_result_dict = response_json["data"]["attributes"]["result"] diff --git a/great_expectations/data_context/templates.py b/great_expectations/data_context/templates.py index e7cccd95de72..9b988b88fcd1 100644 --- a/great_expectations/data_context/templates.py +++ b/great_expectations/data_context/templates.py @@ -54,7 +54,7 @@ def dump(self, data, stream=None, **kw): # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the great_expectations.yml file, it will attempt # to replace the value of `my_key` with the value from an environment # variable `my_value` or a corresponding key read from this config file, diff --git a/great_expectations/data_context/types/base.py b/great_expectations/data_context/types/base.py index df13dac828a4..0bd10261b70e 100644 --- a/great_expectations/data_context/types/base.py +++ b/great_expectations/data_context/types/base.py @@ -50,8 +50,8 @@ logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) -CURRENT_GE_CONFIG_VERSION = 3 -FIRST_GE_CONFIG_VERSION_WITH_CHECKPOINT_STORE = 3 +CURRENT_GX_CONFIG_VERSION = 3 +FIRST_GX_CONFIG_VERSION_WITH_CHECKPOINT_STORE = 3 CURRENT_CHECKPOINT_CONFIG_VERSION = 1 MINIMUM_SUPPORTED_CONFIG_VERSION = 2 DEFAULT_USAGE_STATISTICS_URL = ( @@ -1206,7 +1206,7 @@ def validate_schema(self, data, **kwargs): if "generators" in data: raise ge_exceptions.InvalidConfigError( 'Your current configuration uses the "generators" key in a datasource, but in version 0.10 of ' - 'GE that key is renamed to "batch_kwargs_generators". Please update your configuration to continue.' + 'GX that key is renamed to "batch_kwargs_generators". Please update your configuration to continue.' ) # If a class_name begins with the dollar sign ("$"), then it is assumed to be a variable name to be substituted. if data["class_name"][0] == "$": @@ -1672,15 +1672,15 @@ def validate_schema(self, data, **kwargs) -> None: ), ) - if data["config_version"] > CURRENT_GE_CONFIG_VERSION: + if data["config_version"] > CURRENT_GX_CONFIG_VERSION: raise ge_exceptions.InvalidDataContextConfigError( "You appear to have an invalid config version ({}).\n The maximum valid version is {}.".format( - data["config_version"], CURRENT_GE_CONFIG_VERSION + data["config_version"], CURRENT_GX_CONFIG_VERSION ), validation_error=ValidationError(message="config version too high"), ) - if data["config_version"] < CURRENT_GE_CONFIG_VERSION and ( + if data["config_version"] < CURRENT_GX_CONFIG_VERSION and ( "checkpoint_store_name" in data or any( [ @@ -1691,17 +1691,17 @@ def validate_schema(self, data, **kwargs) -> None: ): raise ge_exceptions.InvalidDataContextConfigError( "You appear to be using a Checkpoint store with an invalid config version ({}).\n Your data context with this older configuration version specifies a Checkpoint store, which is a new feature. Please update your configuration to the new version number {} before adding a Checkpoint store.\n Visit https://docs.greatexpectations.io/docs/guides/miscellaneous/migration_guide#migrating-to-the-batch-request-v3-api to learn more about the upgrade process.".format( - data["config_version"], float(CURRENT_GE_CONFIG_VERSION) + data["config_version"], float(CURRENT_GX_CONFIG_VERSION) ), validation_error=ValidationError( message="You appear to be using a Checkpoint store with an invalid config version ({}).\n Your data context with this older configuration version specifies a Checkpoint store, which is a new feature. Please update your configuration to the new version number {} before adding a Checkpoint store.\n Visit https://docs.greatexpectations.io/docs/guides/miscellaneous/migration_guide#migrating-to-the-batch-request-v3-api to learn more about the upgrade process.".format( - data["config_version"], float(CURRENT_GE_CONFIG_VERSION) + data["config_version"], float(CURRENT_GX_CONFIG_VERSION) ) ), ) if ( - data["config_version"] >= FIRST_GE_CONFIG_VERSION_WITH_CHECKPOINT_STORE + data["config_version"] >= FIRST_GX_CONFIG_VERSION_WITH_CHECKPOINT_STORE and "validation_operators" in data and data["validation_operators"] is not None ): @@ -1710,14 +1710,14 @@ def validate_schema(self, data, **kwargs) -> None: ({data["config_version"]}).\n Your data context with this configuration version uses validation_operators, which \ are being deprecated. Please consult the V3 API migration guide \ https://docs.greatexpectations.io/docs/guides/miscellaneous/migration_guide#migrating-to-the-batch-request-v3-api and \ -update your configuration to be compatible with the version number {CURRENT_GE_CONFIG_VERSION}.\n (This message \ +update your configuration to be compatible with the version number {CURRENT_GX_CONFIG_VERSION}.\n (This message \ will appear repeatedly until your configuration is updated.) """ ) class DataContextConfigDefaults(enum.Enum): - DEFAULT_CONFIG_VERSION = CURRENT_GE_CONFIG_VERSION + DEFAULT_CONFIG_VERSION = CURRENT_GX_CONFIG_VERSION DEFAULT_EXPECTATIONS_STORE_NAME = "expectations_store" EXPECTATIONS_BASE_DIRECTORY = "expectations" DEFAULT_EXPECTATIONS_STORE_BASE_DIRECTORY_RELATIVE_NAME = ( diff --git a/great_expectations/data_context/util.py b/great_expectations/data_context/util.py index 47e7db46f7b1..ced22deb7c66 100644 --- a/great_expectations/data_context/util.py +++ b/great_expectations/data_context/util.py @@ -26,7 +26,7 @@ # TODO: Rename config to constructor_kwargs and config_defaults -> constructor_kwarg_default # TODO: Improve error messages in this method. Since so much of our workflow is config-driven, this will be a *super* important part of DX. def instantiate_class_from_config(config, runtime_environment, config_defaults=None): - """Build a GE class from configuration dictionaries.""" + """Build a GX class from configuration dictionaries.""" if config_defaults is None: config_defaults = {} diff --git a/great_expectations/dataset/sqlalchemy_dataset.py b/great_expectations/dataset/sqlalchemy_dataset.py index 2e5e8839e618..7e0900a78228 100644 --- a/great_expectations/dataset/sqlalchemy_dataset.py +++ b/great_expectations/dataset/sqlalchemy_dataset.py @@ -23,7 +23,7 @@ check_sql_engine_dialect, get_approximate_percentile_disc_sql, ) -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect from great_expectations.util import ( generate_temporary_table_name, get_pyathena_potential_type, @@ -283,7 +283,7 @@ def inner_wrapper( ) count_query: Select - if self.sql_engine_dialect.name.lower() == GESqlDialect.MSSQL: + if self.sql_engine_dialect.name.lower() == GXSqlDialect.MSSQL: count_query = self._get_count_query_mssql( expected_condition=expected_condition, ignore_values_condition=ignore_values_condition, @@ -317,7 +317,7 @@ def inner_wrapper( count_results["unexpected_count"] = int(count_results["unexpected_count"]) # limit doesn't compile properly for oracle so we will append rownum to query string later - if self.engine.dialect.name.lower() == GESqlDialect.ORACLE: + if self.engine.dialect.name.lower() == GXSqlDialect.ORACLE: raw_query = ( sa.select([sa.column(column)]) .select_from(self._table) @@ -758,11 +758,11 @@ def __init__( if custom_sql and not table_name: # NOTE: Eugene 2020-01-31: @James, this is a not a proper fix, but without it the "public" schema # was used for a temp table and raising an error - if engine.dialect.name.lower() != GESqlDialect.TRINO: + if engine.dialect.name.lower() != GXSqlDialect.TRINO: schema = None table_name = generate_temporary_table_name() # mssql expects all temporary table names to have a prefix '#' - if engine.dialect.name.lower() == GESqlDialect.MSSQL: + if engine.dialect.name.lower() == GXSqlDialect.MSSQL: table_name = f"#{table_name}" self.generated_table_name = table_name else: @@ -783,11 +783,11 @@ def __init__( # Currently we do no error handling if the engine doesn't work out of the box. raise err - if self.engine.dialect.name.lower() == GESqlDialect.BIGQUERY: + if self.engine.dialect.name.lower() == GXSqlDialect.BIGQUERY: # In BigQuery the table name is already qualified with its schema name self._table = sa.Table(table_name, sa.MetaData(), schema=None) temp_table_schema_name = None - if self.engine.dialect.name.lower() == GESqlDialect.TRINO: + if self.engine.dialect.name.lower() == GXSqlDialect.TRINO: self._table = sa.Table(table_name, sa.MetaData(), schema=schema) temp_table_schema_name = schema else: @@ -806,43 +806,43 @@ def __init__( dialect_name: str = self.engine.dialect.name.lower() if dialect_name in [ - GESqlDialect.POSTGRESQL, - GESqlDialect.MYSQL, - GESqlDialect.SQLITE, - GESqlDialect.ORACLE, - GESqlDialect.MSSQL, - GESqlDialect.HIVE, + GXSqlDialect.POSTGRESQL, + GXSqlDialect.MYSQL, + GXSqlDialect.SQLITE, + GXSqlDialect.ORACLE, + GXSqlDialect.MSSQL, + GXSqlDialect.HIVE, ]: # These are the officially included and supported dialects by sqlalchemy self.dialect = import_library_module( module_name=f"sqlalchemy.dialects.{self.engine.dialect.name}" ) - elif dialect_name == GESqlDialect.SNOWFLAKE: + elif dialect_name == GXSqlDialect.SNOWFLAKE: self.dialect = import_library_module( module_name="snowflake.sqlalchemy.snowdialect" ) - elif self.engine.dialect.name.lower() == GESqlDialect.DREMIO: + elif self.engine.dialect.name.lower() == GXSqlDialect.DREMIO: # WARNING: Dremio Support is experimental, functionality is not fully under test self.dialect = import_library_module( module_name="sqlalchemy_dremio.pyodbc.dialect" ) - elif dialect_name == GESqlDialect.REDSHIFT: + elif dialect_name == GXSqlDialect.REDSHIFT: self.dialect = import_library_module( module_name="sqlalchemy_redshift.dialect" ) - elif dialect_name == GESqlDialect.BIGQUERY: + elif dialect_name == GXSqlDialect.BIGQUERY: self.dialect = import_library_module(module_name=_BIGQUERY_MODULE_NAME) - elif dialect_name == GESqlDialect.AWSATHENA: + elif dialect_name == GXSqlDialect.AWSATHENA: self.dialect = import_library_module( module_name="pyathena.sqlalchemy_athena" ) - elif dialect_name == GESqlDialect.TERADATASQL: + elif dialect_name == GXSqlDialect.TERADATASQL: # WARNING: Teradata Support is experimental, functionality is not fully under test self.dialect = import_library_module( module_name="teradatasqlalchemy.dialect" ) - elif dialect_name == GESqlDialect.TRINO: + elif dialect_name == GXSqlDialect.TRINO: # WARNING: Trino Support is experimental, functionality is not fully under test self.dialect = import_library_module(module_name="trino.sqlalchemy.dialect") elif dialect_name == "vertica": @@ -854,9 +854,9 @@ def __init__( self.dialect = None if engine and engine.dialect.name.lower() in [ - GESqlDialect.SQLITE, - GESqlDialect.MSSQL, - GESqlDialect.SNOWFLAKE, + GXSqlDialect.SQLITE, + GXSqlDialect.MSSQL, + GXSqlDialect.SNOWFLAKE, ]: # sqlite/mssql/snowflake temp tables only persist within a connection so override the engine self.engine = engine.connect() @@ -872,7 +872,7 @@ def __init__( if ( custom_sql is not None - and self.engine.dialect.name.lower() == GESqlDialect.BIGQUERY + and self.engine.dialect.name.lower() == GXSqlDialect.BIGQUERY ): if ( self.generated_table_name is not None @@ -889,11 +889,11 @@ def __init__( ) if self.generated_table_name is not None: - if self.engine.dialect.name.lower() == GESqlDialect.BIGQUERY: + if self.engine.dialect.name.lower() == GXSqlDialect.BIGQUERY: logger.warning(f"Created permanent table {table_name}") - if self.engine.dialect.name.lower() == GESqlDialect.TRINO: + if self.engine.dialect.name.lower() == GXSqlDialect.TRINO: logger.warning(f"Created permanent view {schema}.{table_name}") - if self.engine.dialect.name.lower() == GESqlDialect.AWSATHENA: + if self.engine.dialect.name.lower() == GXSqlDialect.AWSATHENA: logger.warning(f"Created permanent table default.{table_name}") try: @@ -949,25 +949,25 @@ def head(self, n=5): head_sql_str = "select * from " if ( self._table.schema - and self.engine.dialect.name.lower() != GESqlDialect.BIGQUERY + and self.engine.dialect.name.lower() != GXSqlDialect.BIGQUERY ): head_sql_str += f"{self._table.schema}.{self._table.name}" - elif self.engine.dialect.name.lower() == GESqlDialect.BIGQUERY: + elif self.engine.dialect.name.lower() == GXSqlDialect.BIGQUERY: head_sql_str += f"`{self._table.name}`" else: head_sql_str += self._table.name head_sql_str += f" limit {n:d}" # Limit is unknown in mssql! Use top instead! - if self.engine.dialect.name.lower() == GESqlDialect.MSSQL: + if self.engine.dialect.name.lower() == GXSqlDialect.MSSQL: head_sql_str = f"select top({n}) * from {self._table.name}" # Limit doesn't work in oracle either - if self.engine.dialect.name.lower() == GESqlDialect.ORACLE: + if self.engine.dialect.name.lower() == GXSqlDialect.ORACLE: head_sql_str = f"select * from {self._table.name} WHERE ROWNUM <= {n}" # Limit is unknown in teradatasql! Use sample instead! - if self.engine.dialect.name.lower() == GESqlDialect.TERADATASQL: + if self.engine.dialect.name.lower() == GXSqlDialect.TERADATASQL: head_sql_str = f"select * from {self._table.name} sample {n}" df = pd.read_sql(head_sql_str, con=self.engine) @@ -1011,7 +1011,7 @@ def get_column_nonnull_count(self, column): # first part of OR(IN (NULL)) gives error in teradata sa.column(column).in_(ignore_values) if self.engine.dialect.name.lower() - != GESqlDialect.TERADATASQL + != GXSqlDialect.TERADATASQL else False, # Below is necessary b/c sa.in_() uses `==` but None != None # But we only consider this if None is actually in the list of ignore values @@ -1112,8 +1112,8 @@ def get_column_unique_count(self, column): def get_column_median(self, column): # AWS Athena and presto have an special function that can be used to retrieve the median if ( - self.sql_engine_dialect.name.lower() == GESqlDialect.AWSATHENA - or self.sql_engine_dialect.name.lower() == GESqlDialect.TRINO + self.sql_engine_dialect.name.lower() == GXSqlDialect.AWSATHENA + or self.sql_engine_dialect.name.lower() == GXSqlDialect.TRINO ): element_values = self.engine.execute( f"SELECT approx_percentile({column}, 0.5) FROM {self._table}" @@ -1153,21 +1153,21 @@ def get_column_median(self, column): def get_column_quantiles( self, column: str, quantiles: Iterable, allow_relative_error: bool = False ) -> list: - if self.sql_engine_dialect.name.lower() == GESqlDialect.MSSQL: + if self.sql_engine_dialect.name.lower() == GXSqlDialect.MSSQL: return self._get_column_quantiles_mssql(column=column, quantiles=quantiles) - elif self.sql_engine_dialect.name.lower() == GESqlDialect.AWSATHENA: + elif self.sql_engine_dialect.name.lower() == GXSqlDialect.AWSATHENA: return self._get_column_quantiles_awsathena( column=column, quantiles=quantiles ) - elif self.sql_engine_dialect.name.lower() == GESqlDialect.TRINO: + elif self.sql_engine_dialect.name.lower() == GXSqlDialect.TRINO: return self._get_column_quantiles_trino(column=column, quantiles=quantiles) - elif self.sql_engine_dialect.name.lower() == GESqlDialect.BIGQUERY: + elif self.sql_engine_dialect.name.lower() == GXSqlDialect.BIGQUERY: return self._get_column_quantiles_bigquery( column=column, quantiles=quantiles ) - elif self.sql_engine_dialect.name.lower() == GESqlDialect.MYSQL: + elif self.sql_engine_dialect.name.lower() == GXSqlDialect.MYSQL: return self._get_column_quantiles_mysql(column=column, quantiles=quantiles) - elif self.sql_engine_dialect.name.lower() == GESqlDialect.SNOWFLAKE: + elif self.sql_engine_dialect.name.lower() == GXSqlDialect.SNOWFLAKE: # NOTE: 20201216 - JPC - snowflake has a representation/precision limitation # in its percentile_disc implementation that causes an error when we do # not round. It is unclear to me *how* the call to round affects the behavior -- @@ -1181,7 +1181,7 @@ def get_column_quantiles( quantiles=quantiles, allow_relative_error=allow_relative_error, ) - elif self.sql_engine_dialect.name.lower() == GESqlDialect.SQLITE: + elif self.sql_engine_dialect.name.lower() == GXSqlDialect.SQLITE: return self._get_column_quantiles_sqlite( column=column, quantiles=quantiles, @@ -1416,7 +1416,7 @@ def _get_column_quantiles_generic_sqlalchemy( ) def get_column_stdev(self, column): - if self.sql_engine_dialect.name.lower() == GESqlDialect.MSSQL: + if self.sql_engine_dialect.name.lower() == GXSqlDialect.MSSQL: # Note: "stdev_samp" is not a recognized built-in function name (but "stdev" does exist for "mssql"). # This function is used to compute statistical standard deviation from sample data (per the reference in # https://sqlserverrider.wordpress.com/2013/03/06/standard-deviation-functions-stdev-and-stdevp-sql-server). @@ -1668,25 +1668,25 @@ def create_temporary_table(self, table_name, custom_sql, schema_name=None) -> No # handle cases where dialect.name.lower() returns a byte string (e.g. databricks) if isinstance(engine_dialect, bytes): engine_dialect = str(engine_dialect, "utf-8") - if engine_dialect == GESqlDialect.BIGQUERY: + if engine_dialect == GXSqlDialect.BIGQUERY: stmt = f"CREATE OR REPLACE VIEW `{table_name}` AS {custom_sql}" - elif engine_dialect == GESqlDialect.TRINO: + elif engine_dialect == GXSqlDialect.TRINO: stmt = f"CREATE OR REPLACE VIEW {schema_name}.{table_name} AS {custom_sql}" elif engine_dialect == "databricks": stmt = f"CREATE OR REPLACE TEMPORARY VIEW `{table_name}` AS {custom_sql}" - elif engine_dialect == GESqlDialect.DREMIO: + elif engine_dialect == GXSqlDialect.DREMIO: stmt = f"CREATE OR REPLACE VDS {table_name} AS {custom_sql}" - elif engine_dialect == GESqlDialect.SNOWFLAKE: + elif engine_dialect == GXSqlDialect.SNOWFLAKE: table_type = "TEMPORARY" if self.generated_table_name else "TRANSIENT" logger.info(f"Creating temporary table {table_name}") if schema_name is not None: table_name = f"{schema_name}.{table_name}" stmt = f"CREATE OR REPLACE {table_type} TABLE {table_name} AS {custom_sql}" - elif self.sql_engine_dialect.name == GESqlDialect.MYSQL: + elif self.sql_engine_dialect.name == GXSqlDialect.MYSQL: # Note: We can keep the "MySQL" clause separate for clarity, even though it is the same as the generic case. stmt = f"CREATE TEMPORARY TABLE {table_name} AS {custom_sql}" - elif self.sql_engine_dialect.name == GESqlDialect.MSSQL: + elif self.sql_engine_dialect.name == GXSqlDialect.MSSQL: # Insert "into #{table_name}" in the custom sql query right before the "from" clause # Split is case sensitive so detect case. # Note: transforming custom_sql to uppercase/lowercase has uninteded consequences (i.e., changing column names), so this is not an option! @@ -1698,9 +1698,9 @@ def create_temporary_table(self, table_name, custom_sql, schema_name=None) -> No stmt = ( f"{custom_sqlmod[0]}into {{table_name}} from{custom_sqlmod[1]}" ).format(table_name=table_name) - elif engine_dialect == GESqlDialect.AWSATHENA: + elif engine_dialect == GXSqlDialect.AWSATHENA: stmt = f"CREATE TABLE {table_name} AS {custom_sql}" - elif engine_dialect == GESqlDialect.ORACLE: + elif engine_dialect == GXSqlDialect.ORACLE: # oracle 18c introduced PRIVATE temp tables which are transient objects stmt_1 = "CREATE PRIVATE TEMPORARY TABLE {table_name} ON COMMIT PRESERVE DEFINITION AS {custom_sql}".format( table_name=table_name, custom_sql=custom_sql @@ -1710,11 +1710,11 @@ def create_temporary_table(self, table_name, custom_sql, schema_name=None) -> No stmt_2 = "CREATE GLOBAL TEMPORARY TABLE {table_name} ON COMMIT PRESERVE ROWS AS {custom_sql}".format( table_name=table_name, custom_sql=custom_sql ) - elif engine_dialect == GESqlDialect.TERADATASQL: + elif engine_dialect == GXSqlDialect.TERADATASQL: stmt = 'CREATE VOLATILE TABLE "{table_name}" AS ({custom_sql}) WITH DATA NO PRIMARY INDEX ON COMMIT PRESERVE ROWS'.format( table_name=table_name, custom_sql=custom_sql ) - elif self.sql_engine_dialect.name.lower() == GESqlDialect.HIVE: + elif self.sql_engine_dialect.name.lower() == GXSqlDialect.HIVE: stmt = "CREATE TEMPORARY TABLE {schema_name}.{table_name} AS {custom_sql}".format( schema_name=schema_name if schema_name is not None else "default", table_name=table_name, @@ -1730,7 +1730,7 @@ def create_temporary_table(self, table_name, custom_sql, schema_name=None) -> No else: stmt = f'CREATE TEMPORARY TABLE "{table_name}" AS {custom_sql}' - if engine_dialect == GESqlDialect.ORACLE: + if engine_dialect == GXSqlDialect.ORACLE: try: self.engine.execute(stmt_1) except DatabaseError: @@ -1741,7 +1741,7 @@ def create_temporary_table(self, table_name, custom_sql, schema_name=None) -> No def column_reflection_fallback(self): """If we can't reflect the table, use a query to at least get column names.""" col_info_dict_list: List[Dict] - if self.sql_engine_dialect.name.lower() == GESqlDialect.MSSQL: + if self.sql_engine_dialect.name.lower() == GXSqlDialect.MSSQL: type_module = self._get_dialect_type_module() # Get column names and types from the database # StackOverflow to the rescue: https://stackoverflow.com/a/38634368 @@ -2306,7 +2306,7 @@ def expect_column_values_to_be_unique( # This is a special case that needs to be handled for mysql, where you cannot refer to a temp_table # more than once in the same query. So instead of passing dup_query as-is, a second temp_table is created with # just the column we will be performing the expectation on, and the query is performed against it. - if self.sql_engine_dialect.name.lower() == GESqlDialect.MYSQL: + if self.sql_engine_dialect.name.lower() == GXSqlDialect.MYSQL: temp_table_name = generate_temporary_table_name() temp_table_stmt = "CREATE TEMPORARY TABLE {new_temp_table} AS SELECT tmp.{column_name} FROM {source_table} tmp".format( new_temp_table=temp_table_name, diff --git a/great_expectations/dataset/util.py b/great_expectations/dataset/util.py index 3f1b031b43cc..8ba890343f7e 100644 --- a/great_expectations/dataset/util.py +++ b/great_expectations/dataset/util.py @@ -209,7 +209,7 @@ def build_continuous_partition_object( """Convenience method for building a partition object on continuous data from a dataset and column Args: - dataset (GE Dataset): the dataset for which to compute the partition + dataset (GX Dataset): the dataset for which to compute the partition column (string): The name of the column for which to construct the estimate. bins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto' (for automatically spaced bins) @@ -252,7 +252,7 @@ def build_categorical_partition_object(dataset, column, sort="value"): """Convenience method for building a partition object on categorical data from a dataset and column Args: - dataset (GE Dataset): the dataset for which to compute the partition + dataset (GX Dataset): the dataset for which to compute the partition column (string): The name of the column for which to construct the estimate. sort (string): must be one of "value", "count", or "none". - if "value" then values in the resulting partition object will be sorted lexigraphically diff --git a/great_expectations/datasource/batch_kwargs_generator/batch_kwargs_generator.py b/great_expectations/datasource/batch_kwargs_generator/batch_kwargs_generator.py index 44ebf2bb006a..c80a30c62762 100644 --- a/great_expectations/datasource/batch_kwargs_generator/batch_kwargs_generator.py +++ b/great_expectations/datasource/batch_kwargs_generator/batch_kwargs_generator.py @@ -25,7 +25,7 @@ class BatchKwargsGenerator: A Batch is the primary unit of validation in the Great Expectations DataContext. Batches include metadata that identifies how they were constructed--the same “batch_kwargs” assembled by the batch kwargs generator, While not every datasource will enable re-fetching a - specific batch of data, GE can store snapshots of batches or store metadata from an + specific batch of data, GX can store snapshots of batches or store metadata from an external data version control system. Example Generator Configurations follow:: diff --git a/great_expectations/datasource/batch_kwargs_generator/table_batch_kwargs_generator.py b/great_expectations/datasource/batch_kwargs_generator/table_batch_kwargs_generator.py index 0c008947b929..02c168aeb751 100644 --- a/great_expectations/datasource/batch_kwargs_generator/table_batch_kwargs_generator.py +++ b/great_expectations/datasource/batch_kwargs_generator/table_batch_kwargs_generator.py @@ -9,7 +9,7 @@ ) from great_expectations.datasource.types import SqlAlchemyDatasourceTableBatchKwargs from great_expectations.exceptions import BatchKwargsError, GreatExpectationsError -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect logger = logging.getLogger(__name__) @@ -150,7 +150,7 @@ def _get_iterator( # noqa: C901 - 19 split_data_asset_name = data_asset_name.split(".") if len(split_data_asset_name) == 2: schema_name = split_data_asset_name[0] - if self.engine.dialect.name.lower() == GESqlDialect.BIGQUERY: + if self.engine.dialect.name.lower() == GXSqlDialect.BIGQUERY: table_name = data_asset_name else: table_name = split_data_asset_name[1] @@ -160,14 +160,14 @@ def _get_iterator( # noqa: C901 - 19 elif ( len(split_data_asset_name) == 3 - and self.engine.dialect.name.lower() == GESqlDialect.BIGQUERY + and self.engine.dialect.name.lower() == GXSqlDialect.BIGQUERY ): project_id = split_data_asset_name[0] # noqa: F841 schema_name = split_data_asset_name[1] table_name = data_asset_name else: shape = "[SCHEMA.]TABLE" - if self.engine.dialect.name.lower() == GESqlDialect.BIGQUERY: + if self.engine.dialect.name.lower() == GXSqlDialect.BIGQUERY: shape = f"[PROJECT_ID.]{shape}" raise ValueError( @@ -226,7 +226,7 @@ def get_available_data_asset_names(self): if schema_name in known_information_schemas: continue - if self.engine.dialect.name.lower() == GESqlDialect.BIGQUERY: + if self.engine.dialect.name.lower() == GXSqlDialect.BIGQUERY: tables.extend( [ (table_name, "table") @@ -238,7 +238,7 @@ def get_available_data_asset_names(self): ) else: # set default_schema_name - if self.engine.dialect.name.lower() == GESqlDialect.SQLITE: + if self.engine.dialect.name.lower() == GXSqlDialect.SQLITE: # Workaround for compatibility with sqlalchemy < 1.4.0 and is described in issue #2641 default_schema_name = None else: diff --git a/great_expectations/datasource/data_connector/data_connector.py b/great_expectations/datasource/data_connector/data_connector.py index 8c648548ef7b..bc3ee3069758 100644 --- a/great_expectations/datasource/data_connector/data_connector.py +++ b/great_expectations/datasource/data_connector/data_connector.py @@ -35,7 +35,7 @@ class DataConnector: A Batch is the primary unit of validation in the Great Expectations DataContext. Batches include metadata that identifies how they were constructed--the same “batch_spec” assembled by the data connector, While not every Datasource will enable re-fetching a - specific batch of data, GE can store snapshots of batches or store metadata from an + specific batch of data, GX can store snapshots of batches or store metadata from an external data version control system. """ diff --git a/great_expectations/datasource/data_connector/inferred_asset_sql_data_connector.py b/great_expectations/datasource/data_connector/inferred_asset_sql_data_connector.py index adbc7b98e074..230365c05486 100644 --- a/great_expectations/datasource/data_connector/inferred_asset_sql_data_connector.py +++ b/great_expectations/datasource/data_connector/inferred_asset_sql_data_connector.py @@ -4,7 +4,7 @@ ConfiguredAssetSqlDataConnector, ) from great_expectations.execution_engine import ExecutionEngine -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect from great_expectations.util import deep_filter_properties_iterable try: @@ -251,7 +251,7 @@ def _introspect_db( # noqa: C901 - 16 # The following code fetches the names of external schemas and tables from a special table # 'svv_external_tables'. try: - if engine.dialect.name.lower() == GESqlDialect.REDSHIFT: + if engine.dialect.name.lower() == GXSqlDialect.REDSHIFT: # noinspection SqlDialectInspection,SqlNoDataSourceInspection result = engine.execute( "select schemaname, tablename from svv_external_tables" diff --git a/great_expectations/datasource/pandas_datasource.py b/great_expectations/datasource/pandas_datasource.py index 00b40e4b4c0a..5d3392f80c0c 100644 --- a/great_expectations/datasource/pandas_datasource.py +++ b/great_expectations/datasource/pandas_datasource.py @@ -210,7 +210,7 @@ def get_batch(self, batch_kwargs, batch_parameters=None): elif "s3" in batch_kwargs: # deprecated-v0.13.0 warnings.warn( - "Direct GE Support for the s3 BatchKwarg is deprecated as of v0.13.0 and will be removed in v0.16. " + "Direct GX Support for the s3 BatchKwarg is deprecated as of v0.13.0 and will be removed in v0.16. " "Please use a path including the s3a:// protocol instead.", DeprecationWarning, ) diff --git a/great_expectations/datasource/sparkdf_datasource.py b/great_expectations/datasource/sparkdf_datasource.py index 3791d432d773..c02c52baf59e 100644 --- a/great_expectations/datasource/sparkdf_datasource.py +++ b/great_expectations/datasource/sparkdf_datasource.py @@ -198,7 +198,7 @@ def get_batch(self, batch_kwargs, batch_parameters=None): if "s3" in batch_kwargs: # deprecated-v0.13.0 warnings.warn( - "Direct GE Support for the s3 BatchKwarg is deprecated as of v0.13.0 and will be removed in v0.16. " + "Direct GX Support for the s3 BatchKwarg is deprecated as of v0.13.0 and will be removed in v0.16. " "Please use a path including the s3a:// protocol instead.", DeprecationWarning, ) diff --git a/great_expectations/datasource/sqlalchemy_datasource.py b/great_expectations/datasource/sqlalchemy_datasource.py index c6cf593664d2..16641d93903b 100644 --- a/great_expectations/datasource/sqlalchemy_datasource.py +++ b/great_expectations/datasource/sqlalchemy_datasource.py @@ -14,7 +14,7 @@ DatasourceInitializationError, DatasourceKeyPairAuthBadPassphraseError, ) -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect from great_expectations.types import ClassConfig from great_expectations.types.configurations import classConfigSchema from great_expectations.util import get_sqlalchemy_url, import_make_url @@ -432,13 +432,13 @@ def get_batch(self, batch_kwargs, batch_parameters=None): ) # In BigQuery the table name is already qualified with its schema name - if self.engine.dialect.name.lower() == GESqlDialect.BIGQUERY: + if self.engine.dialect.name.lower() == GXSqlDialect.BIGQUERY: schema = None else: schema = batch_kwargs.get("schema") # limit doesn't compile properly for oracle so we will append rownum to query string later - if self.engine.dialect.name.lower() == GESqlDialect.ORACLE: + if self.engine.dialect.name.lower() == GXSqlDialect.ORACLE: raw_query = sqlalchemy.select([sqlalchemy.text("*")]).select_from( sqlalchemy.schema.Table( table, sqlalchemy.MetaData(), schema=schema @@ -461,7 +461,7 @@ def get_batch(self, batch_kwargs, batch_parameters=None): ) ) # use rownum instead of limit in oracle - if self.engine.dialect.name.lower() == GESqlDialect.ORACLE: + if self.engine.dialect.name.lower() == GXSqlDialect.ORACLE: query += "\nWHERE ROWNUM <= %d" % limit batch_reference = SqlAlchemyBatchReference( engine=self.engine, diff --git a/great_expectations/execution_engine/sparkdf_execution_engine.py b/great_expectations/execution_engine/sparkdf_execution_engine.py index 4c629e93c680..041f138dd3f1 100644 --- a/great_expectations/execution_engine/sparkdf_execution_engine.py +++ b/great_expectations/execution_engine/sparkdf_execution_engine.py @@ -313,7 +313,7 @@ def get_batch_data_and_markers( raise ge_exceptions.ExecutionEngineError( """ Spark schema was not properly serialized. - Please run the .jsonValue() method on the schema object before loading into GE. + Please run the .jsonValue() method on the schema object before loading into GX. schema: your_schema.jsonValue() """ ) diff --git a/great_expectations/execution_engine/split_and_sample/sqlalchemy_data_sampler.py b/great_expectations/execution_engine/split_and_sample/sqlalchemy_data_sampler.py index c2073985a8a9..a5583862ed41 100644 --- a/great_expectations/execution_engine/split_and_sample/sqlalchemy_data_sampler.py +++ b/great_expectations/execution_engine/split_and_sample/sqlalchemy_data_sampler.py @@ -5,7 +5,7 @@ from great_expectations.execution_engine.split_and_sample.data_sampler import ( DataSampler, ) -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect try: import sqlalchemy as sa @@ -51,7 +51,7 @@ def sample_using_limit( # Split clause should be permissive of all values if not supplied. if where_clause is None: - if execution_engine.dialect_name == GESqlDialect.SQLITE: + if execution_engine.dialect_name == GXSqlDialect.SQLITE: where_clause = sa.text("1 = 1") else: where_clause = sa.true() @@ -61,7 +61,7 @@ def sample_using_limit( # SQLalchemy's semantics for LIMIT are different than normal WHERE clauses, # so the business logic for building the query needs to be different. dialect_name: str = execution_engine.dialect_name - if dialect_name == GESqlDialect.ORACLE: + if dialect_name == GXSqlDialect.ORACLE: # TODO: AJB 20220429 WARNING THIS oracle dialect METHOD IS NOT COVERED BY TESTS # limit doesn't compile properly for oracle so we will append rownum to query string later raw_query: Selectable = ( @@ -79,7 +79,7 @@ def sample_using_limit( ) query += "\nAND ROWNUM <= %d" % batch_spec["sampling_kwargs"]["n"] return query - elif dialect_name == GESqlDialect.MSSQL: + elif dialect_name == GXSqlDialect.MSSQL: # Note that this code path exists because the limit parameter is not getting rendered # successfully in the resulting mssql query. selectable_query: Selectable = ( diff --git a/great_expectations/execution_engine/split_and_sample/sqlalchemy_data_splitter.py b/great_expectations/execution_engine/split_and_sample/sqlalchemy_data_splitter.py index d2a3bb97af26..00ecc1676ccf 100644 --- a/great_expectations/execution_engine/split_and_sample/sqlalchemy_data_splitter.py +++ b/great_expectations/execution_engine/split_and_sample/sqlalchemy_data_splitter.py @@ -24,7 +24,7 @@ DatePart, SplitterMethod, ) -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect try: import sqlalchemy as sa @@ -212,7 +212,7 @@ def split_on_converted_datetime( date_format_string: str = "%Y-%m-%d", ) -> bool: """Convert the values in the named column to the given date_format, and split on that""" - if self._dialect == GESqlDialect.SQLITE: + if self._dialect == GXSqlDialect.SQLITE: return ( sa.func.strftime( date_format_string, @@ -232,7 +232,7 @@ def split_on_divided_integer( batch_identifiers: dict, ) -> bool: """Divide the values in the named column by `divisor`, and split on that""" - if self._dialect == GESqlDialect.SQLITE: + if self._dialect == GXSqlDialect.SQLITE: return ( sa.cast( (sa.cast(sa.column(column_name), sa.Integer) / divisor), sa.Integer @@ -240,7 +240,7 @@ def split_on_divided_integer( == batch_identifiers[column_name] ) - if self._dialect == GESqlDialect.MYSQL: + if self._dialect == GXSqlDialect.MYSQL: return ( sa.cast( sa.func.truncate( @@ -251,7 +251,7 @@ def split_on_divided_integer( == batch_identifiers[column_name] ) - if self._dialect == GESqlDialect.MSSQL: + if self._dialect == GXSqlDialect.MSSQL: return ( sa.cast( sa.func.round( @@ -262,7 +262,7 @@ def split_on_divided_integer( == batch_identifiers[column_name] ) - if self._dialect == GESqlDialect.AWSATHENA: + if self._dialect == GXSqlDialect.AWSATHENA: return ( sa.cast( sa.func.truncate( @@ -291,8 +291,8 @@ def split_on_mod_integer( ) -> bool: """Divide the values in the named column by `mod`, and split on that""" if self._dialect in [ - GESqlDialect.SQLITE, - GESqlDialect.MSSQL, + GXSqlDialect.SQLITE, + GXSqlDialect.MSSQL, ]: return ( sa.cast(sa.column(column_name), sa.Integer) % mod @@ -325,7 +325,7 @@ def split_on_hashed_column( batch_identifiers: dict, ) -> bool: """Split on the hashed value of the named column""" - if self._dialect == GESqlDialect.SQLITE: + if self._dialect == GXSqlDialect.SQLITE: return ( sa.func.md5(sa.cast(sa.column(column_name), sa.VARCHAR), hash_digits) == batch_identifiers[column_name] @@ -511,7 +511,7 @@ def get_split_query_for_data_for_batch_identifiers_for_split_on_date_parts( # NOTE: 6/29/2022 Certain SQLAlchemy-compliant backends (e.g., Amazon Redshift, SQLite) allow only binary operators for "CONCAT". """ - if self._dialect == GESqlDialect.SQLITE: + if self._dialect == GXSqlDialect.SQLITE: concat_date_parts = sa.cast( sa.func.extract(date_parts[0].value, sa.column(column_name)), sa.String, @@ -784,7 +784,7 @@ def get_split_query_for_data_for_batch_identifiers_for_split_on_divided_integer( divisor: int, ) -> Selectable: """Divide the values in the named column by `divisor`, and split on that""" - if self._dialect == GESqlDialect.SQLITE: + if self._dialect == GXSqlDialect.SQLITE: return sa.select( [ sa.func.distinct( @@ -796,7 +796,7 @@ def get_split_query_for_data_for_batch_identifiers_for_split_on_divided_integer( ] ).select_from(sa.text(table_name)) - if self._dialect == GESqlDialect.MYSQL: + if self._dialect == GXSqlDialect.MYSQL: return sa.select( [ sa.func.distinct( @@ -811,7 +811,7 @@ def get_split_query_for_data_for_batch_identifiers_for_split_on_divided_integer( ] ).select_from(sa.text(table_name)) - if self._dialect == GESqlDialect.MSSQL: + if self._dialect == GXSqlDialect.MSSQL: return sa.select( [ sa.func.distinct( @@ -827,7 +827,7 @@ def get_split_query_for_data_for_batch_identifiers_for_split_on_divided_integer( ] ).select_from(sa.text(table_name)) - if self._dialect == GESqlDialect.AWSATHENA: + if self._dialect == GXSqlDialect.AWSATHENA: return sa.select( [ sa.func.distinct( @@ -862,8 +862,8 @@ def get_split_query_for_data_for_batch_identifiers_for_split_on_mod_integer( ) -> Selectable: """Divide the values in the named column by `mod`, and split on that""" if self._dialect in [ - GESqlDialect.SQLITE, - GESqlDialect.MSSQL, + GXSqlDialect.SQLITE, + GXSqlDialect.MSSQL, ]: return sa.select( [sa.func.distinct(sa.cast(sa.column(column_name), sa.Integer) % mod)] @@ -896,7 +896,7 @@ def get_split_query_for_data_for_batch_identifiers_for_split_on_hashed_column( hash_digits: int, ) -> Selectable: """Note: this method is experimental. It does not work with all SQL dialects.""" - if self._dialect == GESqlDialect.SQLITE: + if self._dialect == GXSqlDialect.SQLITE: return sa.select( [ sa.func.distinct( diff --git a/great_expectations/execution_engine/sqlalchemy_batch_data.py b/great_expectations/execution_engine/sqlalchemy_batch_data.py index a6b19b3471c6..4b33fd151a4b 100644 --- a/great_expectations/execution_engine/sqlalchemy_batch_data.py +++ b/great_expectations/execution_engine/sqlalchemy_batch_data.py @@ -2,7 +2,7 @@ from typing import Optional from great_expectations.core.batch import BatchData -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect from great_expectations.util import generate_temporary_table_name try: @@ -112,9 +112,9 @@ def __init__( dialect_name: str = engine.dialect.name.lower() try: - dialect = GESqlDialect(dialect_name) + dialect = GXSqlDialect(dialect_name) except ValueError: - dialect = GESqlDialect.OTHER + dialect = GXSqlDialect.OTHER self._dialect = dialect @@ -122,7 +122,7 @@ def __init__( # Suggestion: pull this block out as its own _function if use_quoted_name: table_name = quoted_name(table_name, quote=True) - if dialect == GESqlDialect.BIGQUERY: + if dialect == GXSqlDialect.BIGQUERY: if schema_name is not None: logger.warning( "schema_name should not be used when passing a table_name for biquery. Instead, include the schema name in the table_name string." @@ -142,11 +142,11 @@ def __init__( elif create_temp_table: generated_table_name = generate_temporary_table_name() # mssql expects all temporary table names to have a prefix '#' - if dialect == GESqlDialect.MSSQL: + if dialect == GXSqlDialect.MSSQL: generated_table_name = f"#{generated_table_name}" if selectable is not None: - if dialect in [GESqlDialect.ORACLE, GESqlDialect.MSSQL] and isinstance( + if dialect in [GXSqlDialect.ORACLE, GXSqlDialect.MSSQL] and isinstance( selectable, str ): # oracle, mssql query could already be passed as a string @@ -175,7 +175,7 @@ def __init__( self._selectable = selectable.alias(self._record_set_name) @property - def dialect(self) -> GESqlDialect: + def dialect(self) -> GXSqlDialect: return self._dialect @property @@ -211,17 +211,17 @@ def _create_temporary_table( # noqa: C901 - 18 :param query: """ - dialect: GESqlDialect = self.dialect + dialect: GXSqlDialect = self.dialect # dialects that support temp schemas if temp_table_schema_name is not None and dialect in [ - GESqlDialect.BIGQUERY, - GESqlDialect.SNOWFLAKE, - GESqlDialect.VERTICA, + GXSqlDialect.BIGQUERY, + GXSqlDialect.SNOWFLAKE, + GXSqlDialect.VERTICA, ]: temp_table_name = f"{temp_table_schema_name}.{temp_table_name}" - if dialect == GESqlDialect.BIGQUERY: + if dialect == GXSqlDialect.BIGQUERY: # BigQuery Table is created using with an expiration of 24 hours using Google's Data Definition Language # https://stackoverflow.com/questions/20673986/how-to-create-temporary-table-in-google-bigquery stmt = f"""CREATE OR REPLACE TABLE `{temp_table_name}` @@ -230,15 +230,15 @@ def _create_temporary_table( # noqa: C901 - 18 CURRENT_TIMESTAMP(), INTERVAL 24 HOUR) ) AS {query}""" - elif dialect == GESqlDialect.DREMIO: + elif dialect == GXSqlDialect.DREMIO: stmt = f"CREATE OR REPLACE VDS {temp_table_name} AS {query}" - elif dialect == GESqlDialect.SNOWFLAKE: + elif dialect == GXSqlDialect.SNOWFLAKE: stmt = f"CREATE OR REPLACE TEMPORARY TABLE {temp_table_name} AS {query}" - elif dialect == GESqlDialect.MYSQL: + elif dialect == GXSqlDialect.MYSQL: stmt = f"CREATE TEMPORARY TABLE {temp_table_name} AS {query}" - elif dialect == GESqlDialect.HIVE: + elif dialect == GXSqlDialect.HIVE: stmt = f"CREATE TEMPORARY TABLE `{temp_table_name}` AS {query}" - elif dialect == GESqlDialect.MSSQL: + elif dialect == GXSqlDialect.MSSQL: # Insert "into #{temp_table_name}" in the custom sql query right before the "from" clause # Split is case sensitive so detect case. # Note: transforming query to uppercase/lowercase has unintended consequences (i.e., @@ -257,17 +257,17 @@ def _create_temporary_table( # noqa: C901 - 18 ) # TODO: logger.warning is emitted in situations where a permanent TABLE is created in _create_temporary_table() # Similar message may be needed in the future for Trino backend. - elif dialect == GESqlDialect.TRINO: + elif dialect == GXSqlDialect.TRINO: logger.warning( - f"GE has created permanent view {temp_table_name} as part of processing SqlAlchemyBatchData, which usually creates a TEMP TABLE." + f"GX has created permanent view {temp_table_name} as part of processing SqlAlchemyBatchData, which usually creates a TEMP TABLE." ) stmt = f"CREATE TABLE {temp_table_name} AS {query}" - elif dialect == GESqlDialect.AWSATHENA: + elif dialect == GXSqlDialect.AWSATHENA: logger.warning( - f"GE has created permanent TABLE {temp_table_name} as part of processing SqlAlchemyBatchData, which usually creates a TEMP TABLE." + f"GX has created permanent TABLE {temp_table_name} as part of processing SqlAlchemyBatchData, which usually creates a TEMP TABLE." ) stmt = f"CREATE TABLE {temp_table_name} AS {query}" - elif dialect == GESqlDialect.ORACLE: + elif dialect == GXSqlDialect.ORACLE: # oracle 18c introduced PRIVATE temp tables which are transient objects stmt_1 = "CREATE PRIVATE TEMPORARY TABLE {temp_table_name} ON COMMIT PRESERVE DEFINITION AS {query}".format( temp_table_name=temp_table_name, query=query @@ -278,15 +278,15 @@ def _create_temporary_table( # noqa: C901 - 18 temp_table_name=temp_table_name, query=query ) # Please note that Teradata is currently experimental (as of 0.13.43) - elif dialect == GESqlDialect.TERADATASQL: + elif dialect == GXSqlDialect.TERADATASQL: stmt = 'CREATE VOLATILE TABLE "{temp_table_name}" AS ({query}) WITH DATA NO PRIMARY INDEX ON COMMIT PRESERVE ROWS'.format( temp_table_name=temp_table_name, query=query ) - elif dialect == GESqlDialect.VERTICA: + elif dialect == GXSqlDialect.VERTICA: stmt = f"CREATE TEMPORARY TABLE {temp_table_name} ON COMMIT PRESERVE ROWS AS {query}" else: stmt = f'CREATE TEMPORARY TABLE "{temp_table_name}" AS {query}' - if dialect == GESqlDialect.ORACLE: + if dialect == GXSqlDialect.ORACLE: try: self._engine.execute(stmt_1) except DatabaseError: diff --git a/great_expectations/execution_engine/sqlalchemy_dialect.py b/great_expectations/execution_engine/sqlalchemy_dialect.py index 28bc4f763ed5..b22cf7303384 100644 --- a/great_expectations/execution_engine/sqlalchemy_dialect.py +++ b/great_expectations/execution_engine/sqlalchemy_dialect.py @@ -4,7 +4,7 @@ from typing import Any, List, Union -class GESqlDialect(Enum): +class GXSqlDialect(Enum): """Contains sql dialects that have some level of support in Great Expectations. Also contains an unsupported attribute if the dialect is not in the list. """ @@ -25,7 +25,7 @@ class GESqlDialect(Enum): VERTICA = "vertica" OTHER = "other" - def __eq__(self, other: Union[str, bytes, GESqlDialect]): # type: ignore[override] # supertype uses `object` + def __eq__(self, other: Union[str, bytes, GXSqlDialect]): # type: ignore[override] # supertype uses `object` if isinstance(other, str): return self.value.lower() == other.lower() # Comparison against byte string, e.g. `b"hive"` should be treated as unicode @@ -33,7 +33,7 @@ def __eq__(self, other: Union[str, bytes, GESqlDialect]): # type: ignore[overri return self.value.lower() == other.lower().decode("utf-8") return self.value.lower() == other.value.lower() - def __hash__(self: GESqlDialect): + def __hash__(self: GXSqlDialect): return hash(self.value) @classmethod @@ -50,10 +50,10 @@ def get_all_dialect_names(cls) -> List[str]: return [ dialect_name.value for dialect_name in cls - if dialect_name != GESqlDialect.OTHER + if dialect_name != GXSqlDialect.OTHER ] @classmethod - def get_all_dialects(cls) -> List[GESqlDialect]: + def get_all_dialects(cls) -> List[GXSqlDialect]: """Get all dialects.""" - return [dialect for dialect in cls if dialect != GESqlDialect.OTHER] + return [dialect for dialect in cls if dialect != GXSqlDialect.OTHER] diff --git a/great_expectations/execution_engine/sqlalchemy_execution_engine.py b/great_expectations/execution_engine/sqlalchemy_execution_engine.py index 063a9c681bd7..817ebfc2918f 100644 --- a/great_expectations/execution_engine/sqlalchemy_execution_engine.py +++ b/great_expectations/execution_engine/sqlalchemy_execution_engine.py @@ -69,7 +69,7 @@ from great_expectations.execution_engine.sqlalchemy_batch_data import ( SqlAlchemyBatchData, ) -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect from great_expectations.expectations.row_conditions import ( RowCondition, RowConditionParserType, @@ -135,7 +135,7 @@ if sa: sa.dialects.registry.register( - GESqlDialect.DREMIO, "sqlalchemy_dremio.pyodbc", "dialect" + GXSqlDialect.DREMIO, "sqlalchemy_dremio.pyodbc", "dialect" ) except ImportError: sqlalchemy_dremio = None @@ -147,7 +147,7 @@ # Sometimes "snowflake-sqlalchemy" fails to self-register in certain environments, so we do it explicitly. # (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake) sa.dialects.registry.register( - GESqlDialect.SNOWFLAKE, "snowflake.sqlalchemy", "dialect" + GXSqlDialect.SNOWFLAKE, "snowflake.sqlalchemy", "dialect" ) except (ImportError, KeyError, AttributeError): snowflake = None @@ -157,7 +157,7 @@ import sqlalchemy_bigquery as sqla_bigquery sa.dialects.registry.register( - GESqlDialect.BIGQUERY, _BIGQUERY_MODULE_NAME, "dialect" + GXSqlDialect.BIGQUERY, _BIGQUERY_MODULE_NAME, "dialect" ) bigquery_types_tuple = None except ImportError: @@ -175,7 +175,7 @@ # Sometimes "pybigquery.sqlalchemy_bigquery" fails to self-register in Azure (our CI/CD pipeline) in certain cases, so we do it explicitly. # (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake) sa.dialects.registry.register( - GESqlDialect.BIGQUERY, _BIGQUERY_MODULE_NAME, "dialect" + GXSqlDialect.BIGQUERY, _BIGQUERY_MODULE_NAME, "dialect" ) try: getattr(sqla_bigquery, "INTEGER") @@ -331,42 +331,42 @@ def __init__( # noqa: C901 - 17 # these are two backends where temp_table_creation is not supported we set the default value to False. if self.dialect_name in [ - GESqlDialect.TRINO, - GESqlDialect.AWSATHENA, # WKS 202201 - AWS Athena currently doesn't support temp_tables. + GXSqlDialect.TRINO, + GXSqlDialect.AWSATHENA, # WKS 202201 - AWS Athena currently doesn't support temp_tables. ]: self._create_temp_table = False # Get the dialect **for purposes of identifying types** if self.dialect_name in [ - GESqlDialect.POSTGRESQL, - GESqlDialect.MYSQL, - GESqlDialect.SQLITE, - GESqlDialect.ORACLE, - GESqlDialect.MSSQL, + GXSqlDialect.POSTGRESQL, + GXSqlDialect.MYSQL, + GXSqlDialect.SQLITE, + GXSqlDialect.ORACLE, + GXSqlDialect.MSSQL, ]: # These are the officially included and supported dialects by sqlalchemy self.dialect_module = import_library_module( module_name=f"sqlalchemy.dialects.{self.engine.dialect.name}" ) - elif self.dialect_name == GESqlDialect.SNOWFLAKE: + elif self.dialect_name == GXSqlDialect.SNOWFLAKE: self.dialect_module = import_library_module( module_name="snowflake.sqlalchemy.snowdialect" ) - elif self.dialect_name == GESqlDialect.DREMIO: + elif self.dialect_name == GXSqlDialect.DREMIO: # WARNING: Dremio Support is experimental, functionality is not fully under test self.dialect_module = import_library_module( module_name="sqlalchemy_dremio.pyodbc" ) - elif self.dialect_name == GESqlDialect.REDSHIFT: + elif self.dialect_name == GXSqlDialect.REDSHIFT: self.dialect_module = import_library_module( module_name="sqlalchemy_redshift.dialect" ) - elif self.dialect_name == GESqlDialect.BIGQUERY: + elif self.dialect_name == GXSqlDialect.BIGQUERY: self.dialect_module = import_library_module( module_name=_BIGQUERY_MODULE_NAME ) - elif self.dialect_name == GESqlDialect.TERADATASQL: + elif self.dialect_name == GXSqlDialect.TERADATASQL: # WARNING: Teradata Support is experimental, functionality is not fully under test self.dialect_module = import_library_module( module_name="teradatasqlalchemy.dialect" @@ -380,16 +380,16 @@ def __init__( # noqa: C901 - 17 # Connection can be handled separately. self._engine_backup = None if self.engine and self.dialect_name in [ - GESqlDialect.SQLITE, - GESqlDialect.MSSQL, - GESqlDialect.SNOWFLAKE, - GESqlDialect.MYSQL, + GXSqlDialect.SQLITE, + GXSqlDialect.MSSQL, + GXSqlDialect.SNOWFLAKE, + GXSqlDialect.MYSQL, ]: self._engine_backup = self.engine # sqlite/mssql temp tables only persist within a connection so override the engine self.engine = self.engine.connect() if ( - self._engine_backup.dialect.name.lower() == GESqlDialect.SQLITE + self._engine_backup.dialect.name.lower() == GXSqlDialect.SQLITE and not isinstance(self._engine_backup, sa.engine.base.Connection) ): raw_connection = self._engine_backup.raw_connection() @@ -636,7 +636,7 @@ def get_domain_records( # noqa: C901 - 24 filter_condition = filter_conditions[0] assert ( filter_condition.condition_type == RowConditionParserType.GE - ), "filter_condition must be of type GE for SqlAlchemyExecutionEngine" + ), "filter_condition must be of type GX for SqlAlchemyExecutionEngine" selectable = ( sa.select([sa.text("*")]) @@ -1140,7 +1140,7 @@ def _build_selectable_from_batch_spec( ) else: - if self.dialect_name == GESqlDialect.SQLITE: + if self.dialect_name == GXSqlDialect.SQLITE: split_clause = sa.text("1 = 1") else: split_clause = sa.true() diff --git a/great_expectations/expectations/core/expect_column_kl_divergence_to_be_less_than.py b/great_expectations/expectations/core/expect_column_kl_divergence_to_be_less_than.py index 189d0a32c32a..14e65abe5fff 100644 --- a/great_expectations/expectations/core/expect_column_kl_divergence_to_be_less_than.py +++ b/great_expectations/expectations/core/expect_column_kl_divergence_to_be_less_than.py @@ -1129,9 +1129,6 @@ def _prescriptive_summary( runtime_configuration: Optional[dict] = None, **kwargs, ): - """ - Rendering function that is utilized by GE Cloud Front-end - """ ( header_template_str, header_params_with_json_schema, diff --git a/great_expectations/expectations/core/expect_column_quantile_values_to_be_between.py b/great_expectations/expectations/core/expect_column_quantile_values_to_be_between.py index aacb0e718e34..cea67033ec99 100644 --- a/great_expectations/expectations/core/expect_column_quantile_values_to_be_between.py +++ b/great_expectations/expectations/core/expect_column_quantile_values_to_be_between.py @@ -382,9 +382,6 @@ def _prescriptive_summary( runtime_configuration: Optional[dict] = None, **kwargs, ): - """ - Rendering function that is utilized by GE Cloud Front-end - """ ( header_template_str, header_params_with_json_schema, diff --git a/great_expectations/expectations/core/expect_column_values_to_be_of_type.py b/great_expectations/expectations/core/expect_column_values_to_be_of_type.py index b64fa934b6ea..179ec4c7741b 100644 --- a/great_expectations/expectations/core/expect_column_values_to_be_of_type.py +++ b/great_expectations/expectations/core/expect_column_values_to_be_of_type.py @@ -17,7 +17,7 @@ SparkDFExecutionEngine, SqlAlchemyExecutionEngine, ) -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect from great_expectations.expectations.expectation import ( ColumnMapExpectation, render_evaluation_parameter_string, @@ -374,7 +374,7 @@ def _validate_sqlalchemy(self, actual_column_type, expected_type, execution_engi if ( expected_type.lower() == "geography" and execution_engine.engine.dialect.name.lower() - == GESqlDialect.BIGQUERY + == GXSqlDialect.BIGQUERY and not BIGQUERY_GEO_SUPPORT ): logger.warning( diff --git a/great_expectations/expectations/expectation.py b/great_expectations/expectations/expectation.py index c8dfcbdde8ef..998ac6233283 100644 --- a/great_expectations/expectations/expectation.py +++ b/great_expectations/expectations/expectation.py @@ -162,7 +162,7 @@ def inner_func( rendered_string_template.append(rendered_content) else: raise GreatExpectationsError( - f"""GE was not able to render the value of evaluation parameters. + f"""GX was not able to render the value of evaluation parameters. Expectation {render_func} had evaluation parameters set, but they were not passed in.""" ) return rendered_string_template @@ -295,7 +295,7 @@ def _atomic_prescriptive_failed( **kwargs: dict, ) -> RenderedAtomicContent: """ - Default rendering function that is utilized by GE Cloud Front-end if an implemented atomic renderer fails + Default rendering function that is utilized by GX Cloud Front-end if an implemented atomic renderer fails """ template_str = "Rendering failed for Expectation: " @@ -406,9 +406,6 @@ def _prescriptive_summary( runtime_configuration: Optional[dict] = None, **kwargs: dict, ): - """ - Rendering function that is utilized by GE Cloud Front-end - """ ( template_str, params_with_json_schema, @@ -843,10 +840,6 @@ def _atomic_diagnostic_failed( result: Optional[ExpectationValidationResult] = None, **kwargs: dict, ) -> RenderedAtomicContent: - """ - Rendering function that is utilized by GE Cloud Front-end - """ - expectation_type: str expectation_kwargs: dict if configuration: @@ -904,9 +897,6 @@ def _atomic_diagnostic_observed_value( runtime_configuration: Optional[dict] = None, **kwargs: dict, ) -> RenderedAtomicContent: - """ - Rendering function that is utilized by GE Cloud Front-end - """ observed_value: str = cls._get_observed_value_from_evr(result=result) value_obj = renderedAtomicValueSchema.load( { diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_quantile_values.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_quantile_values.py index 3a9165b2523e..24a9886f7912 100644 --- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_quantile_values.py +++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_quantile_values.py @@ -13,7 +13,7 @@ SparkDFExecutionEngine, SqlAlchemyExecutionEngine, ) -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect from great_expectations.execution_engine.util import get_approximate_percentile_disc_sql from great_expectations.expectations.metrics.column_aggregate_metric_provider import ( ColumnAggregateMetricProvider, @@ -104,35 +104,35 @@ def _sqlalchemy( quantiles = metric_value_kwargs["quantiles"] allow_relative_error = metric_value_kwargs.get("allow_relative_error", False) table_row_count = metrics.get("table.row_count") - if dialect.name.lower() == GESqlDialect.MSSQL: + if dialect.name.lower() == GXSqlDialect.MSSQL: return _get_column_quantiles_mssql( column=column, quantiles=quantiles, selectable=selectable, sqlalchemy_engine=sqlalchemy_engine, ) - elif dialect.name.lower() == GESqlDialect.BIGQUERY: + elif dialect.name.lower() == GXSqlDialect.BIGQUERY: return _get_column_quantiles_bigquery( column=column, quantiles=quantiles, selectable=selectable, sqlalchemy_engine=sqlalchemy_engine, ) - elif dialect.name.lower() == GESqlDialect.MYSQL: + elif dialect.name.lower() == GXSqlDialect.MYSQL: return _get_column_quantiles_mysql( column=column, quantiles=quantiles, selectable=selectable, sqlalchemy_engine=sqlalchemy_engine, ) - elif dialect.name.lower() == GESqlDialect.TRINO: + elif dialect.name.lower() == GXSqlDialect.TRINO: return _get_column_quantiles_trino( column=column, quantiles=quantiles, selectable=selectable, sqlalchemy_engine=sqlalchemy_engine, ) - elif dialect.name.lower() == GESqlDialect.SNOWFLAKE: + elif dialect.name.lower() == GXSqlDialect.SNOWFLAKE: # NOTE: 20201216 - JPC - snowflake has a representation/precision limitation # in its percentile_disc implementation that causes an error when we do # not round. It is unclear to me *how* the call to round affects the behavior -- @@ -149,7 +149,7 @@ def _sqlalchemy( selectable=selectable, sqlalchemy_engine=sqlalchemy_engine, ) - elif dialect.name.lower() == GESqlDialect.SQLITE: + elif dialect.name.lower() == GXSqlDialect.SQLITE: return _get_column_quantiles_sqlite( column=column, quantiles=quantiles, @@ -157,7 +157,7 @@ def _sqlalchemy( sqlalchemy_engine=sqlalchemy_engine, table_row_count=table_row_count, ) - elif dialect.name.lower() == GESqlDialect.AWSATHENA: + elif dialect.name.lower() == GXSqlDialect.AWSATHENA: return _get_column_quantiles_athena( column=column, quantiles=quantiles, diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_standard_deviation.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_standard_deviation.py index c6f1054760f4..eb824a5875b3 100644 --- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_standard_deviation.py +++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_standard_deviation.py @@ -8,7 +8,7 @@ SparkDFExecutionEngine, SqlAlchemyExecutionEngine, ) -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect from great_expectations.expectations.metrics.column_aggregate_metric_provider import ( ColumnAggregateMetricProvider, column_aggregate_partial, @@ -41,9 +41,9 @@ def _pandas(cls, column, **kwargs): @column_aggregate_partial(engine=SqlAlchemyExecutionEngine) def _sqlalchemy(cls, column, _dialect, _metrics, **kwargs): """SqlAlchemy Standard Deviation implementation""" - if _dialect.name.lower() == GESqlDialect.MSSQL: + if _dialect.name.lower() == GXSqlDialect.MSSQL: standard_deviation = sa.func.stdev(column) - elif _dialect.name.lower() == GESqlDialect.SQLITE: + elif _dialect.name.lower() == GXSqlDialect.SQLITE: mean = _metrics["column.mean"] nonnull_row_count = _metrics["column_values.null.unexpected_count"] standard_deviation = sa.func.sqrt( diff --git a/great_expectations/expectations/metrics/map_metric_provider.py b/great_expectations/expectations/metrics/map_metric_provider.py index 704e11513409..f5f1dc2c69cc 100644 --- a/great_expectations/expectations/metrics/map_metric_provider.py +++ b/great_expectations/expectations/metrics/map_metric_provider.py @@ -19,7 +19,7 @@ MetricFunctionTypes, MetricPartialFunctionTypes, ) -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect from great_expectations.execution_engine.sqlalchemy_execution_engine import ( OperationalError, ) @@ -1999,7 +1999,7 @@ def _sqlalchemy_map_condition_unexpected_count_value( count_selectable = count_selectable.select_from(selectable) try: - if execution_engine.engine.dialect.name.lower() == GESqlDialect.MSSQL: + if execution_engine.engine.dialect.name.lower() == GXSqlDialect.MSSQL: temp_table_name: str = generate_temporary_table_name( default_table_name_prefix="#ge_temp_" ) @@ -2102,7 +2102,7 @@ def _sqlalchemy_column_map_condition_values( query = query.limit(result_format["partial_unexpected_count"]) elif ( result_format["result_format"] == "COMPLETE" - and execution_engine.engine.dialect.name.lower() == GESqlDialect.BIGQUERY + and execution_engine.engine.dialect.name.lower() == GXSqlDialect.BIGQUERY ): logger.warning( "BigQuery imposes a limit of 10000 parameters on individual queries; " diff --git a/great_expectations/expectations/metrics/table_metrics/table_head.py b/great_expectations/expectations/metrics/table_metrics/table_head.py index 301986a7229d..a9b470ad8c03 100644 --- a/great_expectations/expectations/metrics/table_metrics/table_head.py +++ b/great_expectations/expectations/metrics/table_metrics/table_head.py @@ -8,7 +8,7 @@ SparkDFExecutionEngine, SqlAlchemyExecutionEngine, ) -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect from great_expectations.expectations.metrics.import_manager import sa from great_expectations.expectations.metrics.metric_provider import metric_value from great_expectations.expectations.metrics.table_metric_provider import ( @@ -120,7 +120,7 @@ def _sqlalchemy( dialect=execution_engine.engine.dialect, compile_kwargs={"literal_binds": True}, ) - elif execution_engine.engine.dialect.name.lower() == GESqlDialect.MSSQL: + elif execution_engine.engine.dialect.name.lower() == GXSqlDialect.MSSQL: # limit doesn't compile properly for mssql sql = str( stmt.compile( diff --git a/great_expectations/expectations/metrics/util.py b/great_expectations/expectations/metrics/util.py index 171239c327f1..3378a22d860c 100644 --- a/great_expectations/expectations/metrics/util.py +++ b/great_expectations/expectations/metrics/util.py @@ -12,7 +12,7 @@ ExecutionEngine, SqlAlchemyExecutionEngine, ) -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect from great_expectations.execution_engine.util import check_sql_engine_dialect from great_expectations.util import get_sqlalchemy_inspector @@ -599,7 +599,7 @@ def column_reflection_fallback( query: TextClause = selectable else: # noinspection PyUnresolvedReferences - if dialect.name.lower() == GESqlDialect.REDSHIFT: + if dialect.name.lower() == GXSqlDialect.REDSHIFT: # Redshift needs temp tables to be declared as text query: Select = ( sa.select([sa.text("*")]).select_from(sa.text(selectable)).limit(1) diff --git a/great_expectations/jupyter_ux/__init__.py b/great_expectations/jupyter_ux/__init__.py index 24ab42552afd..4a32b517ccfd 100644 --- a/great_expectations/jupyter_ux/__init__.py +++ b/great_expectations/jupyter_ux/__init__.py @@ -137,7 +137,7 @@ def set_data_source(context, data_source_type=None): def setup_notebook_logging(logger=None, log_level=logging.INFO): - """Set up the provided logger for the GE default logging configuration. + """Set up the provided logger for the GX default logging configuration. Args: logger - the logger to configure diff --git a/great_expectations/profile/user_configurable_profiler.py b/great_expectations/profile/user_configurable_profiler.py index d964000e8f6b..1e2d22389237 100644 --- a/great_expectations/profile/user_configurable_profiler.py +++ b/great_expectations/profile/user_configurable_profiler.py @@ -79,7 +79,7 @@ def __init__( instantiated with or without a config. The config may contain a semantic_types dict or not. Once a profiler is instantiated, if config items change, a new profiler will be needed. - Write an entry on how to use the profiler for the GE docs site + Write an entry on how to use the profiler for the GX docs site Args: profile_dataset: A Great Expectations Dataset or Validator object excluded_expectations: A list of expectations to not include in the suite @@ -478,7 +478,7 @@ def _add_column_type_to_column_info(self, profile_dataset, column_name): """ Adds the data type of a column to the column_info dictionary on self Args: - profile_dataset: A GE dataset + profile_dataset: A GX dataset column_name: The name of the column for which to retrieve the data type Returns: @@ -513,7 +513,7 @@ def _get_column_type(profile_dataset, column) -> str: during the init of the profiler do not persist. Args: - profile_dataset: A GE dataset + profile_dataset: A GX dataset column: The column for which to get the data type Returns: @@ -582,7 +582,7 @@ def _add_column_cardinality_to_column_info(self, profile_dataset, column_name): """ Adds the cardinality of a column to the column_info dictionary on self Args: - profile_dataset: A GE Dataset + profile_dataset: A GX Dataset column_name: The name of the column for which to add cardinality Returns: @@ -620,7 +620,7 @@ def _get_column_cardinality(profile_dataset, column) -> OrderedProfilerCardinali Determines the cardinality of a column using the get_basic_column_cardinality method from OrderedProfilerCardinality Args: - profile_dataset: A GE Dataset + profile_dataset: A GX Dataset column: The column for which to get cardinality Returns: @@ -698,7 +698,7 @@ def _build_column_description_metadata(self, profile_dataset): """ Adds column description metadata to the suite on a Dataset object Args: - profile_dataset: A GE Dataset + profile_dataset: A GX Dataset Returns: An expectation suite with column description metadata @@ -794,11 +794,11 @@ def _build_expectations_value_set(self, profile_dataset, column): """ Adds a value_set expectation for a given column Args: - profile_dataset: A GE Dataset + profile_dataset: A GX Dataset column: The column for which to add an expectation Returns: - The GE Dataset + The GX Dataset """ if "expect_column_values_to_be_in_set" not in self.excluded_expectations: value_set = profile_dataset.expect_column_distinct_values_to_be_in_set( @@ -823,11 +823,11 @@ def _build_expectations_numeric(self, profile_dataset, column): # noqa: C901 - """ Adds a set of numeric expectations for a given column Args: - profile_dataset: A GE Dataset + profile_dataset: A GX Dataset column: The column for which to add expectations Returns: - The GE Dataset + The GX Dataset """ # min @@ -1007,11 +1007,11 @@ def _build_expectations_primary_or_compound_key(self, profile_dataset, column_li """ Adds a uniqueness expectation for a given column or set of columns Args: - profile_dataset: A GE Dataset + profile_dataset: A GX Dataset column_list: A list containing one or more columns for which to add a uniqueness expectation Returns: - The GE Dataset + The GX Dataset """ # uniqueness if ( @@ -1037,11 +1037,11 @@ def _build_expectations_string(self, profile_dataset, column): With the 0.12 API there isn't a quick way to introspect for value_lengths - if we did that, we could build a potentially useful value_lengths expectation here. Args: - profile_dataset: A GE Dataset + profile_dataset: A GX Dataset column: The column for which to add the expectation Returns: - The GE Dataset + The GX Dataset """ if ( @@ -1057,11 +1057,11 @@ def _build_expectations_datetime(self, profile_dataset, column): """ Adds `expect_column_values_to_be_between` for a given column Args: - profile_dataset: A GE Dataset + profile_dataset: A GX Dataset column: The column for which to add the expectation Returns: - The GE Dataset + The GX Dataset """ if "expect_column_values_to_be_between" not in self.excluded_expectations: @@ -1124,11 +1124,11 @@ def _build_expectations_for_all_column_types(self, profile_dataset, column) -> N - `expect_column_proportion_of_unique_values_to_be_between` - `expect_column_values_to_be_in_type_list` Args: - profile_dataset: A GE Dataset + profile_dataset: A GX Dataset column: The column for which to add the expectations Returns: - The GE Dataset + The GX Dataset """ if "expect_column_values_to_not_be_null" not in self.excluded_expectations: not_null_result = profile_dataset.expect_column_values_to_not_be_null( @@ -1213,10 +1213,10 @@ def _build_expectations_table(self, profile_dataset) -> None: """ Adds two table level expectations to the dataset Args: - profile_dataset: A GE Dataset + profile_dataset: A GX Dataset Returns: - The GE Dataset + The GX Dataset """ if ( diff --git a/great_expectations/render/renderer/site_builder.py b/great_expectations/render/renderer/site_builder.py index deb9c9e255a3..32977b6b3e74 100644 --- a/great_expectations/render/renderer/site_builder.py +++ b/great_expectations/render/renderer/site_builder.py @@ -304,7 +304,7 @@ def build(self, resource_identifiers=None, build_index: bool = True): for site_section_builder in self.site_section_builders.values(): site_section_builder.build(resource_identifiers=resource_identifiers) - # GE Cloud supports JSON Site Data Docs + # GX Cloud supports JSON Site Data Docs # Skip static assets, indexing if self.ge_cloud_mode: return diff --git a/great_expectations/util.py b/great_expectations/util.py index 40575f25c925..09e42b507a31 100644 --- a/great_expectations/util.py +++ b/great_expectations/util.py @@ -984,7 +984,7 @@ def validate( from great_expectations.dataset import Dataset, PandasDataset if data_asset_class is None: - # Guess the GE data_asset_type based on the type of the data_asset + # Guess the GX data_asset_type based on the type of the data_asset if isinstance(data_asset, pd.DataFrame): data_asset_class = PandasDataset # Add other data_asset_type conditions here as needed @@ -1722,7 +1722,7 @@ def get_context( ge_cloud_base_url (str): url for ge_cloud endpoint. ge_cloud_access_token (str): access_token for ge_cloud account. ge_cloud_organization_id (str): org_id for ge_cloud account. - ge_cloud_mode (bool): bool flag to specify whether to run GE in cloud mode (default is None). + ge_cloud_mode (bool): bool flag to specify whether to run GX in cloud mode (default is None). Returns: DataContext. Either a DataContext, BaseDataContext, or CloudDataContext depending on environment and/or @@ -1756,7 +1756,7 @@ def get_context( if ge_cloud_mode and not config_available: raise GXCloudConfigurationError( - "GE Cloud Mode enabled, but missing env vars: GE_CLOUD_ORGANIZATION_ID, GE_CLOUD_ACCESS_TOKEN" + "GX Cloud Mode enabled, but missing env vars: GE_CLOUD_ORGANIZATION_ID, GE_CLOUD_ACCESS_TOKEN" ) # Second, check for which type of local diff --git a/reqs/requirements-dev-lite.txt b/reqs/requirements-dev-lite.txt index d76c581c247b..a787b42554d0 100644 --- a/reqs/requirements-dev-lite.txt +++ b/reqs/requirements-dev-lite.txt @@ -12,5 +12,5 @@ pytest-mock>=3.8.2 pytest-timeout>=2.1.0 requirements-parser>=0.2.0 s3fs>=0.5.1 -snapshottest==0.6.0 # GE Cloud atomic renderer tests +snapshottest==0.6.0 # GX Cloud atomic renderer tests sqlalchemy>=1.3.18,<2.0.0 diff --git a/scripts/build_api_docs.py b/scripts/build_api_docs.py index e566da5e8471..56664e2bdc8c 100644 --- a/scripts/build_api_docs.py +++ b/scripts/build_api_docs.py @@ -93,10 +93,10 @@ def _escape_markdown_special_characters(string_to_escape: str) -> str: def _reformat_url_to_docusaurus_path(url: str) -> str: - """Removes the site portion of a GE url. + """Removes the site portion of a GX url. Args: - url: the url including the GE site that should be converted to a Docusaurus absolute link. + url: the url including the GX site that should be converted to a Docusaurus absolute link. """ return re.sub(r"^https://docs\.greatexpectations\.io", "", url).strip() diff --git a/tests/cli/conftest.py b/tests/cli/conftest.py index 5ef9d4e58470..e1f180d46554 100644 --- a/tests/cli/conftest.py +++ b/tests/cli/conftest.py @@ -37,7 +37,7 @@ def empty_context_with_checkpoint_v1_stats_enabled( @pytest.fixture def v10_project_directory(tmp_path_factory): """ - GE 0.10.x project for testing upgrade helper + GX 0.10.x project for testing upgrade helper """ project_path = str(tmp_path_factory.mktemp("v10_project")) context_root_dir = os.path.join(project_path, "great_expectations") diff --git a/tests/cli/test_checkpoint.py b/tests/cli/test_checkpoint.py index 2643d1c33ccd..34e245e103fa 100644 --- a/tests/cli/test_checkpoint.py +++ b/tests/cli/test_checkpoint.py @@ -3455,7 +3455,7 @@ def test_checkpoint_script_raises_error_if_python_file_exists( assert context.list_checkpoints() == ["my_v1_checkpoint"] script_path: str = os.path.join( - context.root_directory, context.GE_UNCOMMITTED_DIR, "run_my_v1_checkpoint.py" + context.root_directory, context.GX_UNCOMMITTED_DIR, "run_my_v1_checkpoint.py" ) with open(script_path, "w") as f: f.write("script here") @@ -3563,7 +3563,7 @@ def test_checkpoint_script_happy_path_generates_script_pandas( ), ] expected_script: str = os.path.join( - context.root_directory, context.GE_UNCOMMITTED_DIR, "run_my_v1_checkpoint.py" + context.root_directory, context.GX_UNCOMMITTED_DIR, "run_my_v1_checkpoint.py" ) assert os.path.isfile(expected_script) @@ -3662,7 +3662,7 @@ def test_checkpoint_script_happy_path_executable_successful_validation_pandas( script_path: str = os.path.abspath( os.path.join( context.root_directory, - context.GE_UNCOMMITTED_DIR, + context.GX_UNCOMMITTED_DIR, "run_my_fancy_checkpoint.py", ) ) @@ -3787,7 +3787,7 @@ def test_checkpoint_script_happy_path_executable_failed_validation_pandas( script_path: str = os.path.abspath( os.path.join( context.root_directory, - context.GE_UNCOMMITTED_DIR, + context.GX_UNCOMMITTED_DIR, "run_my_fancy_checkpoint.py", ) ) @@ -3910,7 +3910,7 @@ def test_checkpoint_script_happy_path_executable_failed_validation_due_to_bad_da script_path: str = os.path.abspath( os.path.join( context.root_directory, - context.GE_UNCOMMITTED_DIR, + context.GX_UNCOMMITTED_DIR, "run_my_fancy_checkpoint.py", ) ) diff --git a/tests/cli/test_datasource_new_pandas_paths.py b/tests/cli/test_datasource_new_pandas_paths.py index f04ff45560d8..4766231a767c 100644 --- a/tests/cli/test_datasource_new_pandas_paths.py +++ b/tests/cli/test_datasource_new_pandas_paths.py @@ -29,7 +29,7 @@ def _run_notebook(context: DataContext) -> None: - uncommitted_dir = os.path.join(context.root_directory, context.GE_UNCOMMITTED_DIR) + uncommitted_dir = os.path.join(context.root_directory, context.GX_UNCOMMITTED_DIR) expected_notebook = os.path.join(uncommitted_dir, "datasource_new.ipynb") with open(expected_notebook) as f: nb = nbformat.read(f, as_version=4) diff --git a/tests/cli/test_datasource_pandas.py b/tests/cli/test_datasource_pandas.py index a44fa61eab1c..cd3d178c98af 100644 --- a/tests/cli/test_datasource_pandas.py +++ b/tests/cli/test_datasource_pandas.py @@ -143,7 +143,7 @@ def test_cli_datasource_new( assert result.exit_code == 0 - uncommitted_dir = os.path.join(root_dir, context.GE_UNCOMMITTED_DIR) + uncommitted_dir = os.path.join(root_dir, context.GX_UNCOMMITTED_DIR) expected_notebook = os.path.join(uncommitted_dir, "datasource_new.ipynb") assert os.path.isfile(expected_notebook) mock_subprocess.assert_called_once_with(["jupyter", "notebook", expected_notebook]) @@ -260,7 +260,7 @@ def test_cli_datasource_new_no_jupyter_writes_notebook( assert result.exit_code == 0 - uncommitted_dir = os.path.join(root_dir, context.GE_UNCOMMITTED_DIR) + uncommitted_dir = os.path.join(root_dir, context.GX_UNCOMMITTED_DIR) expected_notebook = os.path.join(uncommitted_dir, "datasource_new.ipynb") assert os.path.isfile(expected_notebook) assert mock_subprocess.call_count == 0 @@ -324,7 +324,7 @@ def test_cli_datasource_new_with_name_param( assert result.exit_code == 0 - uncommitted_dir = os.path.join(root_dir, context.GE_UNCOMMITTED_DIR) + uncommitted_dir = os.path.join(root_dir, context.GX_UNCOMMITTED_DIR) expected_notebook = os.path.join(uncommitted_dir, "datasource_new.ipynb") assert os.path.isfile(expected_notebook) mock_subprocess.assert_called_once_with(["jupyter", "notebook", expected_notebook]) @@ -406,7 +406,7 @@ def test_cli_datasource_new_from_misc_directory( assert result.exit_code == 0 - uncommitted_dir = os.path.join(root_dir, context.GE_UNCOMMITTED_DIR) + uncommitted_dir = os.path.join(root_dir, context.GX_UNCOMMITTED_DIR) expected_notebook = os.path.join(uncommitted_dir, "datasource_new.ipynb") assert os.path.isfile(expected_notebook) mock_subprocess.assert_called_once_with(["jupyter", "notebook", expected_notebook]) diff --git a/tests/cli/test_datasource_snowflake.py b/tests/cli/test_datasource_snowflake.py index d54c04c4a6a4..ce7c9b432d24 100644 --- a/tests/cli/test_datasource_snowflake.py +++ b/tests/cli/test_datasource_snowflake.py @@ -35,7 +35,7 @@ # assert "What data would you like Great Expectations to connect to?" in stdout # assert "Which database backend are you using?" in stdout # -# uncommitted_dir = os.path.join(root_dir, context.GE_UNCOMMITTED_DIR) +# uncommitted_dir = os.path.join(root_dir, context.GX_UNCOMMITTED_DIR) # expected_notebook = os.path.join(uncommitted_dir, "datasource_new.ipynb") # assert os.path.isfile(expected_notebook) # mock_subprocess.assert_called_once_with(["jupyter", "notebook", expected_notebook]) diff --git a/tests/cli/test_datasource_sqlite.py b/tests/cli/test_datasource_sqlite.py index 8198edadd7f0..2a5c21298866 100644 --- a/tests/cli/test_datasource_sqlite.py +++ b/tests/cli/test_datasource_sqlite.py @@ -246,7 +246,7 @@ def test_cli_datasource_new_connection_string( assert result.exit_code == 0 - uncommitted_dir = os.path.join(root_dir, context.GE_UNCOMMITTED_DIR) + uncommitted_dir = os.path.join(root_dir, context.GX_UNCOMMITTED_DIR) expected_notebook = os.path.join(uncommitted_dir, "datasource_new.ipynb") assert os.path.isfile(expected_notebook) diff --git a/tests/cli/test_docs.py b/tests/cli/test_docs.py index 896dda04170c..2e6a71b2872e 100644 --- a/tests/cli/test_docs.py +++ b/tests/cli/test_docs.py @@ -195,7 +195,7 @@ def test_docs_build_happy_paths_build_site_on_single_site_context( in obs_urls[0]["site_url"] ) site_dir = os.path.join( - root_dir, context.GE_UNCOMMITTED_DIR, "data_docs", "local_site" + root_dir, context.GX_UNCOMMITTED_DIR, "data_docs", "local_site" ) assert os.path.isdir(site_dir) # Note the fixture has no expectations or validations - only check the index @@ -418,7 +418,7 @@ def test_docs_build_happy_paths_build_site_on_multiple_site_context( for expected_site_name in expected_built_site_names: assert expected_site_name in stdout site_dir = os.path.join( - root_dir, context.GE_UNCOMMITTED_DIR, "data_docs", expected_site_name + root_dir, context.GX_UNCOMMITTED_DIR, "data_docs", expected_site_name ) assert os.path.isdir(site_dir) # Note the fixture has no expectations or validations - only check the index @@ -507,7 +507,7 @@ def context_with_site_built(titanic_data_context_stats_enabled_config_version_3) assert len(obs_urls) == 1 expected_index_path = os.path.join( context.root_directory, - context.GE_UNCOMMITTED_DIR, + context.GX_UNCOMMITTED_DIR, "data_docs", "local_site", "index.html", @@ -656,7 +656,7 @@ def test_docs_clean_happy_paths_clean_expected_sites( ] expected_index_path = os.path.join( context.root_directory, - context.GE_UNCOMMITTED_DIR, + context.GX_UNCOMMITTED_DIR, "data_docs", "local_site", "index.html", diff --git a/tests/cli/test_init.py b/tests/cli/test_init.py index 8629c5322c37..136b8912ab65 100644 --- a/tests/cli/test_init.py +++ b/tests/cli/test_init.py @@ -212,7 +212,7 @@ def test_cli_init_on_existing_project_with_no_uncommitted_dirs_answering_no_then in stdout ) - context = DataContext(os.path.join(root_dir, DataContext.GE_DIR)) + context = DataContext(os.path.join(root_dir, DataContext.GX_DIR)) uncommitted_dir = os.path.join(context.root_directory, "uncommitted") shutil.rmtree(uncommitted_dir) assert not os.path.isdir(uncommitted_dir) diff --git a/tests/cli/test_init_pandas.py b/tests/cli/test_init_pandas.py index 421f80537df1..a30234020b9a 100644 --- a/tests/cli/test_init_pandas.py +++ b/tests/cli/test_init_pandas.py @@ -181,7 +181,7 @@ def test_init_on_existing_project_with_no_datasources_should_continue_init_flow_ initialized_project, ): project_dir = initialized_project - ge_dir = os.path.join(project_dir, DataContext.GE_DIR) + ge_dir = os.path.join(project_dir, DataContext.GX_DIR) # mangle the project to remove all traces of a suite and validations _remove_all_datasources(ge_dir) @@ -234,7 +234,7 @@ def test_init_on_existing_project_with_no_datasources_should_continue_init_flow_ ) assert "Great Expectations is now set up." in stdout - config = _load_config_file(os.path.join(ge_dir, DataContext.GE_YML)) + config = _load_config_file(os.path.join(ge_dir, DataContext.GX_YML)) assert "data__dir" in config["datasources"].keys() context = DataContext(ge_dir) @@ -248,7 +248,7 @@ def test_init_on_existing_project_with_no_datasources_should_continue_init_flow_ def _remove_all_datasources(ge_dir): - config_path = os.path.join(ge_dir, DataContext.GE_YML) + config_path = os.path.join(ge_dir, DataContext.GX_YML) config = _load_config_file(config_path) config["datasources"] = {} @@ -297,7 +297,7 @@ def initialized_project(mock_webbrowser, monkeypatch, tmp_path_factory): in mock_webbrowser.call_args[0][0] ) - context = DataContext(os.path.join(project_dir, DataContext.GE_DIR)) + context = DataContext(os.path.join(project_dir, DataContext.GX_DIR)) assert isinstance(context, DataContext) assert len(context.list_datasources()) == 1 return project_dir @@ -313,7 +313,7 @@ def test_init_on_existing_project_with_multiple_datasources_exist_do_nothing( mock_webbrowser, caplog, monkeypatch, initialized_project, filesystem_csv_2 ): project_dir = initialized_project - ge_dir = os.path.join(project_dir, DataContext.GE_DIR) + ge_dir = os.path.join(project_dir, DataContext.GX_DIR) context = DataContext(ge_dir) context.add_datasource( @@ -446,7 +446,7 @@ def test_init_on_existing_project_with_datasource_with_no_suite_create_one( initialized_project, ): project_dir = initialized_project - ge_dir = os.path.join(project_dir, DataContext.GE_DIR) + ge_dir = os.path.join(project_dir, DataContext.GX_DIR) uncommitted_dir = os.path.join(ge_dir, "uncommitted") data_folder_path = os.path.join(project_dir, "data") diff --git a/tests/cli/test_init_sqlite.py b/tests/cli/test_init_sqlite.py index 4f5d05c060c0..eca2e24c485e 100644 --- a/tests/cli/test_init_sqlite.py +++ b/tests/cli/test_init_sqlite.py @@ -311,7 +311,7 @@ def test_init_on_existing_project_with_no_datasources_should_continue_init_flow_ sa, ): project_dir = initialized_sqlite_project - ge_dir = os.path.join(project_dir, DataContext.GE_DIR) + ge_dir = os.path.join(project_dir, DataContext.GX_DIR) _remove_all_datasources(ge_dir) os.remove(os.path.join(ge_dir, "expectations", "warning.json")) @@ -368,7 +368,7 @@ def test_init_on_existing_project_with_no_datasources_should_continue_init_flow_ assert "Great Expectations connected to your database" in stdout assert "This looks like an existing project that" not in stdout - config = _load_config_file(os.path.join(ge_dir, DataContext.GE_YML)) + config = _load_config_file(os.path.join(ge_dir, DataContext.GX_YML)) assert "sqlite" in config["datasources"].keys() context = DataContext(ge_dir) @@ -391,7 +391,7 @@ def test_init_on_existing_project_with_no_datasources_should_continue_init_flow_ def _remove_all_datasources(ge_dir): - config_path = os.path.join(ge_dir, DataContext.GE_YML) + config_path = os.path.join(ge_dir, DataContext.GX_YML) config = _load_config_file(config_path) config["datasources"] = {} @@ -452,7 +452,7 @@ def initialized_sqlite_project( assert_no_logging_messages_or_tracebacks(caplog, result) - context = DataContext(os.path.join(project_dir, DataContext.GE_DIR)) + context = DataContext(os.path.join(project_dir, DataContext.GX_DIR)) assert isinstance(context, DataContext) assert len(context.list_datasources()) == 1 assert context.list_datasources() == [ @@ -485,7 +485,7 @@ def test_init_on_existing_project_with_multiple_datasources_exist_do_nothing( empty_sqlite_db, ): project_dir = initialized_sqlite_project - ge_dir = os.path.join(project_dir, DataContext.GE_DIR) + ge_dir = os.path.join(project_dir, DataContext.GX_DIR) context = DataContext(ge_dir) datasource_name = "wow_a_datasource" @@ -616,7 +616,7 @@ def test_init_on_existing_project_with_datasource_with_no_suite_create_one( mock_webbrowser, caplog, monkeypatch, initialized_sqlite_project, sa ): project_dir = initialized_sqlite_project - ge_dir = os.path.join(project_dir, DataContext.GE_DIR) + ge_dir = os.path.join(project_dir, DataContext.GX_DIR) uncommitted_dir = os.path.join(ge_dir, "uncommitted") # mangle the setup to remove all traces of any suite diff --git a/tests/cli/upgrade_helpers/test_upgrade_helper.py b/tests/cli/upgrade_helpers/test_upgrade_helper.py index 7b27cf6a6b18..0217ec3db619 100644 --- a/tests/cli/upgrade_helpers/test_upgrade_helper.py +++ b/tests/cli/upgrade_helpers/test_upgrade_helper.py @@ -22,7 +22,7 @@ @pytest.fixture def v20_project_directory_with_v30_configuration_and_v20_checkpoints(tmp_path_factory): """ - GE config_version: 3 project for testing upgrade helper + GX config_version: 3 project for testing upgrade helper """ project_path = str(tmp_path_factory.mktemp("v30_project")) context_root_dir = os.path.join(project_path, "great_expectations") @@ -46,7 +46,7 @@ def v20_project_directory_with_v30_configuration_and_v20_checkpoints(tmp_path_fa @pytest.fixture def v20_project_directory_with_v30_configuration_and_no_checkpoints(tmp_path_factory): """ - GE config_version: 3 project for testing upgrade helper + GX config_version: 3 project for testing upgrade helper """ project_path = str(tmp_path_factory.mktemp("v30_project")) context_root_dir = os.path.join(project_path, "great_expectations") @@ -208,7 +208,7 @@ def test_basic_project_upgrade(v10_project_directory, caplog): ) as f: expected_stdout: str = f.read().strip() expected_stdout = expected_stdout.replace( - "GE_PROJECT_DIR", v10_project_directory + "GX_PROJECT_DIR", v10_project_directory ) assert stdout == expected_stdout @@ -269,7 +269,7 @@ def test_basic_project_upgrade(v10_project_directory, caplog): expected_upgrade_log_dict: dict = json.load(f) expected_upgrade_log_str: str = json.dumps(expected_upgrade_log_dict) expected_upgrade_log_str = expected_upgrade_log_str.replace( - "GE_PROJECT_DIR", v10_project_directory + "GX_PROJECT_DIR", v10_project_directory ) expected_upgrade_log_dict: dict = json.loads(expected_upgrade_log_str) @@ -314,7 +314,7 @@ def test_project_upgrade_with_manual_steps( ) as f: expected_stdout: str = f.read().strip() expected_stdout = expected_stdout.replace( - "GE_PROJECT_DIR", v10_project_directory + "GX_PROJECT_DIR", v10_project_directory ) assert stdout == expected_stdout @@ -382,7 +382,7 @@ def test_project_upgrade_with_manual_steps( expected_upgrade_log_dict: dict = json.load(f) expected_upgrade_log_str: str = json.dumps(expected_upgrade_log_dict) expected_upgrade_log_str = expected_upgrade_log_str.replace( - "GE_PROJECT_DIR", v10_project_directory + "GX_PROJECT_DIR", v10_project_directory ) expected_upgrade_log_dict = json.loads(expected_upgrade_log_str) @@ -425,7 +425,7 @@ def test_project_upgrade_with_exception(v10_project_directory, caplog): ) as f: expected_stdout: str = f.read().strip() expected_stdout = expected_stdout.replace( - "GE_PROJECT_DIR", v10_project_directory + "GX_PROJECT_DIR", v10_project_directory ) assert stdout == expected_stdout @@ -485,10 +485,10 @@ def test_project_upgrade_with_exception(v10_project_directory, caplog): expected_upgrade_log_dict: dict = json.load(f) expected_upgrade_log_str: str = json.dumps(expected_upgrade_log_dict) expected_upgrade_log_str = expected_upgrade_log_str.replace( - "GE_PROJECT_DIR", v10_project_directory + "GX_PROJECT_DIR", v10_project_directory ) expected_upgrade_log_str = expected_upgrade_log_str.replace( - "GE_PATH", os.path.split(great_expectations.__file__)[0] + "GX_PATH", os.path.split(great_expectations.__file__)[0] ) expected_upgrade_log_dict = json.loads(expected_upgrade_log_str) @@ -522,7 +522,7 @@ def test_v2_to_v3_project_upgrade_with_all_manual_steps_checkpoints_datasources_ ) as f: expected_stdout: str = f.read().strip() expected_stdout = expected_stdout.replace( - "GE_PROJECT_DIR", v20_project_directory + "GX_PROJECT_DIR", v20_project_directory ) assert stdout == expected_stdout @@ -590,7 +590,7 @@ def test_v2_to_v3_project_upgrade_with_all_manual_steps_checkpoints_datasources_ expected_upgrade_log_dict: dict = json.load(f) expected_upgrade_log_str: str = json.dumps(expected_upgrade_log_dict) expected_upgrade_log_str = expected_upgrade_log_str.replace( - "GE_PROJECT_DIR", v20_project_directory + "GX_PROJECT_DIR", v20_project_directory ) expected_upgrade_log_dict = json.loads(expected_upgrade_log_str) @@ -629,7 +629,7 @@ def test_v2_to_v3_project_upgrade_with_manual_steps_checkpoints( ) as f: expected_stdout: str = f.read().strip() expected_stdout = expected_stdout.replace( - "GE_PROJECT_DIR", + "GX_PROJECT_DIR", v20_project_directory_with_v30_configuration_and_v20_checkpoints, ) assert stdout == expected_stdout @@ -703,7 +703,7 @@ def test_v2_to_v3_project_upgrade_with_manual_steps_checkpoints( expected_upgrade_log_dict: dict = json.load(f) expected_upgrade_log_str: str = json.dumps(expected_upgrade_log_dict) expected_upgrade_log_str = expected_upgrade_log_str.replace( - "GE_PROJECT_DIR", + "GX_PROJECT_DIR", v20_project_directory_with_v30_configuration_and_v20_checkpoints, ) expected_upgrade_log_dict = json.loads(expected_upgrade_log_str) @@ -743,7 +743,7 @@ def test_v2_to_v3_project_upgrade_without_manual_steps( ) as f: expected_stdout: str = f.read().strip() expected_stdout = expected_stdout.replace( - "GE_PROJECT_DIR", + "GX_PROJECT_DIR", v20_project_directory_with_v30_configuration_and_no_checkpoints, ) assert stdout == expected_stdout @@ -811,7 +811,7 @@ def test_v2_to_v3_project_upgrade_without_manual_steps( expected_upgrade_log_dict: dict = json.load(f) expected_upgrade_log_str: str = json.dumps(expected_upgrade_log_dict) expected_upgrade_log_str = expected_upgrade_log_str.replace( - "GE_PROJECT_DIR", + "GX_PROJECT_DIR", v20_project_directory_with_v30_configuration_and_no_checkpoints, ) expected_upgrade_log_dict = json.loads(expected_upgrade_log_str) diff --git a/tests/cli/v012/test_checkpoint.py b/tests/cli/v012/test_checkpoint.py index d0617b08c4a6..7e2889eadb57 100644 --- a/tests/cli/v012/test_checkpoint.py +++ b/tests/cli/v012/test_checkpoint.py @@ -907,7 +907,7 @@ def test_checkpoint_script_raises_error_if_python_file_exists_with_ge_config_v2( root_dir = context.root_directory assert context.list_checkpoints() == ["my_checkpoint"] script_path = os.path.join( - root_dir, context.GE_UNCOMMITTED_DIR, "run_my_checkpoint.py" + root_dir, context.GX_UNCOMMITTED_DIR, "run_my_checkpoint.py" ) with open(script_path, "w") as f: f.write("script here") @@ -998,7 +998,7 @@ def test_checkpoint_script_happy_path_generates_script_with_ge_config_v2( ), ] expected_script = os.path.join( - root_dir, context.GE_UNCOMMITTED_DIR, "run_my_checkpoint.py" + root_dir, context.GX_UNCOMMITTED_DIR, "run_my_checkpoint.py" ) assert os.path.isfile(expected_script) @@ -1041,7 +1041,7 @@ def test_checkpoint_script_happy_path_executable_successful_validation_with_ge_c ) script_path = os.path.abspath( - os.path.join(root_dir, context.GE_UNCOMMITTED_DIR, "run_my_checkpoint.py") + os.path.join(root_dir, context.GX_UNCOMMITTED_DIR, "run_my_checkpoint.py") ) assert os.path.isfile(script_path) @@ -1099,7 +1099,7 @@ def test_checkpoint_script_happy_path_executable_failed_validation_with_ge_confi ) script_path = os.path.abspath( - os.path.join(root_dir, context.GE_UNCOMMITTED_DIR, "run_my_checkpoint.py") + os.path.join(root_dir, context.GX_UNCOMMITTED_DIR, "run_my_checkpoint.py") ) assert os.path.isfile(script_path) diff --git a/tests/cli/v012/test_datasource_pandas.py b/tests/cli/v012/test_datasource_pandas.py index 38e03863c36a..c42c125549df 100644 --- a/tests/cli/v012/test_datasource_pandas.py +++ b/tests/cli/v012/test_datasource_pandas.py @@ -105,7 +105,7 @@ def test_cli_datasource_new(caplog, empty_data_context, filesystem_csv_2): assert result.exit_code == 0 - config_path = os.path.join(project_root_dir, DataContext.GE_YML) + config_path = os.path.join(project_root_dir, DataContext.GX_YML) config = yaml.load(open(config_path)) datasources = config["datasources"] assert "mynewsource" in datasources.keys() diff --git a/tests/cli/v012/test_datasource_sqlite.py b/tests/cli/v012/test_datasource_sqlite.py index 3b1024a267cc..bc12cb4518b4 100644 --- a/tests/cli/v012/test_datasource_sqlite.py +++ b/tests/cli/v012/test_datasource_sqlite.py @@ -184,7 +184,7 @@ def test_cli_datasource_new_connection_string( assert result.exit_code == 0 - config_path = os.path.join(project_root_dir, DataContext.GE_YML) + config_path = os.path.join(project_root_dir, DataContext.GX_YML) config = yaml.load(open(config_path)) datasources = config["datasources"] assert "mynewsource" in datasources.keys() diff --git a/tests/cli/v012/test_init.py b/tests/cli/v012/test_init.py index 08cc1a5c5b6b..2c90f36676b2 100644 --- a/tests/cli/v012/test_init.py +++ b/tests/cli/v012/test_init.py @@ -62,7 +62,7 @@ def test_cli_init_on_existing_project_with_no_uncommitted_dirs_answering_yes_to_ assert "Great Expectations is now set up." in stdout - context = DataContext(os.path.join(root_dir, DataContext.GE_DIR)) + context = DataContext(os.path.join(root_dir, DataContext.GX_DIR)) uncommitted_dir = os.path.join(context.root_directory, "uncommitted") shutil.rmtree(uncommitted_dir) assert not os.path.isdir(uncommitted_dir) @@ -197,9 +197,9 @@ def test_cli_init_connection_string_non_working_db_connection_instructs_user_and assert result.exit_code == 1 - ge_dir = os.path.join(root_dir, DataContext.GE_DIR) + ge_dir = os.path.join(root_dir, DataContext.GX_DIR) assert os.path.isdir(ge_dir) - config_path = os.path.join(ge_dir, DataContext.GE_YML) + config_path = os.path.join(ge_dir, DataContext.GX_YML) assert os.path.isfile(config_path) config = yaml.load(open(config_path)) @@ -216,7 +216,7 @@ def test_cli_init_connection_string_non_working_db_connection_instructs_user_and } config_path = os.path.join( - ge_dir, DataContext.GE_UNCOMMITTED_DIR, "config_variables.yml" + ge_dir, DataContext.GX_UNCOMMITTED_DIR, "config_variables.yml" ) config = yaml.load(open(config_path)) assert config["my_db"] == { diff --git a/tests/cli/v012/test_init_pandas.py b/tests/cli/v012/test_init_pandas.py index bdd6d7bccd69..49d2894e025d 100644 --- a/tests/cli/v012/test_init_pandas.py +++ b/tests/cli/v012/test_init_pandas.py @@ -184,7 +184,7 @@ def test_init_on_existing_project_with_no_datasources_should_continue_init_flow_ initialized_project, ): project_dir = initialized_project - ge_dir = os.path.join(project_dir, DataContext.GE_DIR) + ge_dir = os.path.join(project_dir, DataContext.GX_DIR) # mangle the project to remove all traces of a suite and validations _remove_all_datasources(ge_dir) @@ -236,7 +236,7 @@ def test_init_on_existing_project_with_no_datasources_should_continue_init_flow_ ) assert "Great Expectations is now set up." in stdout - config = _load_config_file(os.path.join(ge_dir, DataContext.GE_YML)) + config = _load_config_file(os.path.join(ge_dir, DataContext.GX_YML)) assert "data__dir" in config["datasources"].keys() context = DataContext(ge_dir) @@ -250,7 +250,7 @@ def test_init_on_existing_project_with_no_datasources_should_continue_init_flow_ def _remove_all_datasources(ge_dir): - config_path = os.path.join(ge_dir, DataContext.GE_YML) + config_path = os.path.join(ge_dir, DataContext.GX_YML) config = _load_config_file(config_path) config["datasources"] = {} @@ -298,7 +298,7 @@ def initialized_project(mock_webbrowser, tmp_path_factory): in mock_webbrowser.call_args[0][0] ) - context = DataContext(os.path.join(project_dir, DataContext.GE_DIR)) + context = DataContext(os.path.join(project_dir, DataContext.GX_DIR)) assert isinstance(context, DataContext) assert len(context.list_datasources()) == 1 return project_dir @@ -313,7 +313,7 @@ def test_init_on_existing_project_with_multiple_datasources_exist_do_nothing( mock_webbrowser, caplog, initialized_project, filesystem_csv_2 ): project_dir = initialized_project - ge_dir = os.path.join(project_dir, DataContext.GE_DIR) + ge_dir = os.path.join(project_dir, DataContext.GX_DIR) context = DataContext(ge_dir) context.add_datasource( @@ -439,7 +439,7 @@ def test_init_on_existing_project_with_datasource_with_no_suite_create_one( initialized_project, ): project_dir = initialized_project - ge_dir = os.path.join(project_dir, DataContext.GE_DIR) + ge_dir = os.path.join(project_dir, DataContext.GX_DIR) uncommitted_dir = os.path.join(ge_dir, "uncommitted") data_folder_path = os.path.join(project_dir, "data") diff --git a/tests/cli/v012/test_init_sqlite.py b/tests/cli/v012/test_init_sqlite.py index 15e7dd5d5e9f..fc6d099390e1 100644 --- a/tests/cli/v012/test_init_sqlite.py +++ b/tests/cli/v012/test_init_sqlite.py @@ -309,7 +309,7 @@ def test_init_on_existing_project_with_no_datasources_should_continue_init_flow_ mock_webbrowser, caplog, initialized_sqlite_project, titanic_sqlite_db_file, sa ): project_dir = initialized_sqlite_project - ge_dir = os.path.join(project_dir, DataContext.GE_DIR) + ge_dir = os.path.join(project_dir, DataContext.GX_DIR) _remove_all_datasources(ge_dir) os.remove(os.path.join(ge_dir, "expectations", "warning.json")) @@ -367,7 +367,7 @@ def test_init_on_existing_project_with_no_datasources_should_continue_init_flow_ assert "Great Expectations connected to your database" in stdout assert "This looks like an existing project that" not in stdout - config = _load_config_file(os.path.join(ge_dir, DataContext.GE_YML)) + config = _load_config_file(os.path.join(ge_dir, DataContext.GX_YML)) assert "sqlite" in config["datasources"].keys() context = DataContext(ge_dir) @@ -390,7 +390,7 @@ def test_init_on_existing_project_with_no_datasources_should_continue_init_flow_ def _remove_all_datasources(ge_dir): - config_path = os.path.join(ge_dir, DataContext.GE_YML) + config_path = os.path.join(ge_dir, DataContext.GX_YML) config = _load_config_file(config_path) config["datasources"] = {} @@ -454,7 +454,7 @@ def initialized_sqlite_project( assert_no_logging_messages_or_tracebacks(caplog, result) - context = DataContext(os.path.join(project_dir, DataContext.GE_DIR)) + context = DataContext(os.path.join(project_dir, DataContext.GX_DIR)) assert isinstance(context, DataContext) assert len(context.list_datasources()) == 1 assert context.list_datasources() == [ @@ -485,7 +485,7 @@ def test_init_on_existing_project_with_multiple_datasources_exist_do_nothing( empty_sqlite_db, ): project_dir = initialized_sqlite_project - ge_dir = os.path.join(project_dir, DataContext.GE_DIR) + ge_dir = os.path.join(project_dir, DataContext.GX_DIR) context = DataContext(ge_dir) datasource_name = "wow_a_datasource" @@ -608,7 +608,7 @@ def test_init_on_existing_project_with_datasource_with_no_suite_create_one( mock_webbrowser, caplog, initialized_sqlite_project, sa ): project_dir = initialized_sqlite_project - ge_dir = os.path.join(project_dir, DataContext.GE_DIR) + ge_dir = os.path.join(project_dir, DataContext.GX_DIR) uncommitted_dir = os.path.join(ge_dir, "uncommitted") # mangle the setup to remove all traces of any suite diff --git a/tests/cli/v012/test_suite.py b/tests/cli/v012/test_suite.py index cad0e4152536..968cf321e092 100644 --- a/tests/cli/v012/test_suite.py +++ b/tests/cli/v012/test_suite.py @@ -1642,7 +1642,7 @@ def test_suite_scaffold_creates_notebook_and_opens_jupyter( root_dir = context.root_directory suite_name = "foop" expected_notebook_path = os.path.join( - root_dir, context.GE_EDIT_NOTEBOOK_DIR, f"scaffold_{suite_name}.ipynb" + root_dir, context.GX_EDIT_NOTEBOOK_DIR, f"scaffold_{suite_name}.ipynb" ) assert not os.path.isfile(expected_notebook_path) @@ -1702,7 +1702,7 @@ def test_suite_scaffold_creates_notebook_with_no_jupyter_flag( root_dir = context.root_directory suite_name = "foop" expected_notebook_path = os.path.join( - root_dir, context.GE_EDIT_NOTEBOOK_DIR, f"scaffold_{suite_name}.ipynb" + root_dir, context.GX_EDIT_NOTEBOOK_DIR, f"scaffold_{suite_name}.ipynb" ) assert not os.path.isfile(expected_notebook_path) diff --git a/tests/cli/v012/test_suite_pre_v013.py b/tests/cli/v012/test_suite_pre_v013.py index e29eb8583248..a229b2348ea3 100644 --- a/tests/cli/v012/test_suite_pre_v013.py +++ b/tests/cli/v012/test_suite_pre_v013.py @@ -1573,7 +1573,7 @@ def test_suite_scaffold_creates_notebook_and_opens_jupyter( root_dir = context.root_directory suite_name = "foop" expected_notebook_path = os.path.join( - root_dir, context.GE_EDIT_NOTEBOOK_DIR, f"scaffold_{suite_name}.ipynb" + root_dir, context.GX_EDIT_NOTEBOOK_DIR, f"scaffold_{suite_name}.ipynb" ) assert not os.path.isfile(expected_notebook_path) @@ -1633,7 +1633,7 @@ def test_suite_scaffold_creates_notebook_with_no_jupyter_flag( root_dir = context.root_directory suite_name = "foop" expected_notebook_path = os.path.join( - root_dir, context.GE_EDIT_NOTEBOOK_DIR, f"scaffold_{suite_name}.ipynb" + root_dir, context.GX_EDIT_NOTEBOOK_DIR, f"scaffold_{suite_name}.ipynb" ) assert not os.path.isfile(expected_notebook_path) diff --git a/tests/cli/v012/upgrade_helpers/test_upgrade_helper_pre_v013.py b/tests/cli/v012/upgrade_helpers/test_upgrade_helper_pre_v013.py index eb3233f57d9b..b927d566908d 100644 --- a/tests/cli/v012/upgrade_helpers/test_upgrade_helper_pre_v013.py +++ b/tests/cli/v012/upgrade_helpers/test_upgrade_helper_pre_v013.py @@ -146,7 +146,7 @@ def test_basic_project_upgrade(v10_project_directory, caplog): ) as f: expected_stdout = f.read().strip() expected_stdout = expected_stdout.replace( - "GE_PROJECT_DIR", v10_project_directory + "GX_PROJECT_DIR", v10_project_directory ) assert stdout == expected_stdout @@ -207,7 +207,7 @@ def test_basic_project_upgrade(v10_project_directory, caplog): expected_upgrade_log_dict = json.load(f) expected_upgrade_log_str = json.dumps(expected_upgrade_log_dict) expected_upgrade_log_str = expected_upgrade_log_str.replace( - "GE_PROJECT_DIR", v10_project_directory + "GX_PROJECT_DIR", v10_project_directory ) expected_upgrade_log_dict = json.loads(expected_upgrade_log_str) @@ -252,7 +252,7 @@ def test_project_upgrade_with_manual_steps( ) as f: expected_stdout = f.read().strip() expected_stdout = expected_stdout.replace( - "GE_PROJECT_DIR", v10_project_directory + "GX_PROJECT_DIR", v10_project_directory ) assert stdout == expected_stdout @@ -320,7 +320,7 @@ def test_project_upgrade_with_manual_steps( expected_upgrade_log_dict = json.load(f) expected_upgrade_log_str = json.dumps(expected_upgrade_log_dict) expected_upgrade_log_str = expected_upgrade_log_str.replace( - "GE_PROJECT_DIR", v10_project_directory + "GX_PROJECT_DIR", v10_project_directory ) expected_upgrade_log_dict = json.loads(expected_upgrade_log_str) @@ -363,7 +363,7 @@ def test_project_upgrade_with_exception(v10_project_directory, caplog): ) as f: expected_stdout = f.read().strip() expected_stdout = expected_stdout.replace( - "GE_PROJECT_DIR", v10_project_directory + "GX_PROJECT_DIR", v10_project_directory ) assert stdout == expected_stdout @@ -423,7 +423,7 @@ def test_project_upgrade_with_exception(v10_project_directory, caplog): expected_upgrade_log_dict = json.load(f) expected_upgrade_log_str = json.dumps(expected_upgrade_log_dict) expected_upgrade_log_str = expected_upgrade_log_str.replace( - "GE_PROJECT_DIR", v10_project_directory + "GX_PROJECT_DIR", v10_project_directory ) expected_upgrade_log_str = expected_upgrade_log_str.replace( "GE_PATH", os.path.split(great_expectations.__file__)[0] @@ -460,7 +460,7 @@ def test_v2_to_v3_project_upgrade(v20_project_directory, caplog): ) as f: expected_stdout = f.read().strip() expected_stdout = expected_stdout.replace( - "GE_PROJECT_DIR", v20_project_directory + "GX_PROJECT_DIR", v20_project_directory ) assert stdout == expected_stdout @@ -528,7 +528,7 @@ def test_v2_to_v3_project_upgrade(v20_project_directory, caplog): expected_upgrade_log_dict = json.load(f) expected_upgrade_log_str = json.dumps(expected_upgrade_log_dict) expected_upgrade_log_str = expected_upgrade_log_str.replace( - "GE_PROJECT_DIR", v20_project_directory + "GX_PROJECT_DIR", v20_project_directory ) expected_upgrade_log_dict = json.loads(expected_upgrade_log_str) diff --git a/tests/conftest.py b/tests/conftest.py index 9aa5c634f43b..43b449a3f4b6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1753,7 +1753,7 @@ def site_builder_data_context_v013_with_html_store_titanic_random( @pytest.fixture def v20_project_directory(tmp_path_factory): """ - GE config_version: 2 project for testing upgrade helper + GX config_version: 2 project for testing upgrade helper """ project_path = str(tmp_path_factory.mktemp("v20_project")) context_root_dir = os.path.join(project_path, "great_expectations") diff --git a/tests/core/test_evaluation_parameters.py b/tests/core/test_evaluation_parameters.py index 066616796294..35415e403d17 100644 --- a/tests/core/test_evaluation_parameters.py +++ b/tests/core/test_evaluation_parameters.py @@ -85,7 +85,7 @@ def test_parse_evaluation_parameter(): == 9 ) - # Non GE URN syntax fails + # Non GX URN syntax fails with pytest.raises(EvaluationParameterError) as err: parse_evaluation_parameter("urn:ieee:not_ge * 10", {"urn:ieee:not_ge": 1}) assert "Parse Failure" in str(err.value) diff --git a/tests/core/usage_statistics/test_execution_environment.py b/tests/core/usage_statistics/test_execution_environment.py index 3aa49d3a3d8b..1915044f456d 100644 --- a/tests/core/usage_statistics/test_execution_environment.py +++ b/tests/core/usage_statistics/test_execution_environment.py @@ -6,7 +6,7 @@ from packaging import version from great_expectations.core.usage_statistics.execution_environment import ( - GEExecutionEnvironment, + GXExecutionEnvironment, InstallEnvironment, PackageInfo, ) @@ -24,16 +24,16 @@ "input_version", ["8.8.8", "0.14.12+14.g8f54aa902.dirty", "0.1.0.post0"] ) @mock.patch( - "great_expectations.core.usage_statistics.execution_environment.GEExecutionEnvironment._get_all_installed_packages", + "great_expectations.core.usage_statistics.execution_environment.GXExecutionEnvironment._get_all_installed_packages", return_value=True, ) @mock.patch(METADATA_VERSION_PATCH, return_value=True) @mock.patch( - "great_expectations.core.usage_statistics.package_dependencies.GEDependencies.get_dev_dependency_names", + "great_expectations.core.usage_statistics.package_dependencies.GXDependencies.get_dev_dependency_names", return_value=True, ) @mock.patch( - "great_expectations.core.usage_statistics.package_dependencies.GEDependencies.get_required_dependency_names", + "great_expectations.core.usage_statistics.package_dependencies.GXDependencies.get_required_dependency_names", return_value=True, ) def test_get_installed_packages( @@ -43,7 +43,7 @@ def test_get_installed_packages( get_all_installed_packages, input_version, ): - """Test that we are able to retrieve installed and not installed packages in the GE execution environment.""" + """Test that we are able to retrieve installed and not installed packages in the GX execution environment.""" get_required_dependency_names.return_value = [ "req-package-1", @@ -65,7 +65,7 @@ def test_get_installed_packages( "dev-package-2", ] - ge_execution_environment = GEExecutionEnvironment() + ge_execution_environment = GXExecutionEnvironment() expected_dependencies: List[PackageInfo] = [ PackageInfo( package_name="req-package-1", diff --git a/tests/core/usage_statistics/test_package_dependencies.py b/tests/core/usage_statistics/test_package_dependencies.py index 1b14e5a9ff5e..1321147dfc28 100644 --- a/tests/core/usage_statistics/test_package_dependencies.py +++ b/tests/core/usage_statistics/test_package_dependencies.py @@ -2,7 +2,7 @@ import pytest -from great_expectations.core.usage_statistics.package_dependencies import GEDependencies +from great_expectations.core.usage_statistics.package_dependencies import GXDependencies def test__get_dependency_names(): @@ -47,18 +47,18 @@ def test__get_dependency_names(): "1", "-", ] - ge_dependencies = GEDependencies() + ge_dependencies = GXDependencies() observed_dependencies = ge_dependencies._get_dependency_names(mock_dependencies) assert observed_dependencies == expected_dependendencies @pytest.mark.integration def test_required_dependency_names_match_requirements_file(): - """If there is a mismatch, GEDependencies should change to match our requirements.txt file. + """If there is a mismatch, GXDependencies should change to match our requirements.txt file. - See GEDependencies for a utility to check for a mismatch. + See GXDependencies for a utility to check for a mismatch. """ - ge_dependencies = GEDependencies() + ge_dependencies = GXDependencies() assert ( ge_dependencies.get_required_dependency_names() == ge_dependencies.get_required_dependency_names_from_requirements_file() @@ -67,11 +67,11 @@ def test_required_dependency_names_match_requirements_file(): @pytest.mark.integration def test_dev_dependency_names_match_requirements_file(): - """If there is a mismatch, GEDependencies should change to match our requirements-dev*.txt files. + """If there is a mismatch, GXDependencies should change to match our requirements-dev*.txt files. - See GEDependencies for a utility to check for a mismatch. + See GXDependencies for a utility to check for a mismatch. """ - ge_dependencies = GEDependencies() + ge_dependencies = GXDependencies() assert ge_dependencies.get_dev_dependency_names() == set( ge_dependencies.get_dev_dependency_names_from_requirements_file() - ) - set(GEDependencies.GE_DEV_DEPENDENCIES_EXCLUDED_FROM_TRACKING) + ) - set(GXDependencies.GX_DEV_DEPENDENCIES_EXCLUDED_FROM_TRACKING) diff --git a/tests/data_context/cloud_data_context/test_expectation_suite_crud.py b/tests/data_context/cloud_data_context/test_expectation_suite_crud.py index c9766c88ea54..ddd12259e634 100644 --- a/tests/data_context/cloud_data_context/test_expectation_suite_crud.py +++ b/tests/data_context/cloud_data_context/test_expectation_suite_crud.py @@ -501,6 +501,6 @@ def test_save_expectation_suite_no_overwrite_id_collision_raises_error( resource_name=suite_name, ) ) - assert f"expectation_suite with GE Cloud ID {suite_id} already exists" in str( + assert f"expectation_suite with GX Cloud ID {suite_id} already exists" in str( e.value ) diff --git a/tests/data_context/datasource/test_data_context_datasource_runtime_data_connector_sparkdf_execution_engine.py b/tests/data_context/datasource/test_data_context_datasource_runtime_data_connector_sparkdf_execution_engine.py index 3199ad3cedc3..db276477358c 100644 --- a/tests/data_context/datasource/test_data_context_datasource_runtime_data_connector_sparkdf_execution_engine.py +++ b/tests/data_context/datasource/test_data_context_datasource_runtime_data_connector_sparkdf_execution_engine.py @@ -649,7 +649,7 @@ def test_file_path_get_batch_successful_specification_spark_directory_batch_spec This tests the same behavior as the previous test, test_get_batch_successful_specification_spark_directory, but the batch_spec_passthrough is in the Datasource configuration, found in the data_context_with_datasource_spark_engine_batch_spec_passthrough - fixture. This is why the `batch_spec_passthrough` parameters are commented out, but GE is still able to read in the 3 CSV files + fixture. This is why the `batch_spec_passthrough` parameters are commented out, but GX is still able to read in the 3 CSV files as a single SparkDF with 30,000 lines. """ diff --git a/tests/data_context/fixtures/contexts/incomplete_uncommitted/great_expectations/great_expectations.yml b/tests/data_context/fixtures/contexts/incomplete_uncommitted/great_expectations/great_expectations.yml index 3fd0ed2c6c13..bd3d04460f47 100644 --- a/tests/data_context/fixtures/contexts/incomplete_uncommitted/great_expectations/great_expectations.yml +++ b/tests/data_context/fixtures/contexts/incomplete_uncommitted/great_expectations/great_expectations.yml @@ -25,7 +25,7 @@ datasources: # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the config file it will attempt to replace the value # of `my_key` with the value from an environment variable `my_value` or a # corresponding key read from the file specified using diff --git a/tests/data_context/fixtures/contexts/incomplete_uncommitted/great_expectations/uncommitted/config_variables.yml b/tests/data_context/fixtures/contexts/incomplete_uncommitted/great_expectations/uncommitted/config_variables.yml index 9ad0729723b3..7947199bf164 100644 --- a/tests/data_context/fixtures/contexts/incomplete_uncommitted/great_expectations/uncommitted/config_variables.yml +++ b/tests/data_context/fixtures/contexts/incomplete_uncommitted/great_expectations/uncommitted/config_variables.yml @@ -3,7 +3,7 @@ ge_comment_preservation_key: 1 # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the config file it will attempt to replace the value # of `my_key` with the value from an environment variable `my_value` or a # corresponding key read from the file specified using diff --git a/tests/data_context/fixtures/plugins/my_custom_non_core_ge_class.py b/tests/data_context/fixtures/plugins/my_custom_non_core_ge_class.py index 09915fd85288..a3eadc9e9c17 100644 --- a/tests/data_context/fixtures/plugins/my_custom_non_core_ge_class.py +++ b/tests/data_context/fixtures/plugins/my_custom_non_core_ge_class.py @@ -1,7 +1,7 @@ class MyCustomNonCoreGeClass: """ This class is used only for testing. - E.g. ensuring appropriate usage stats messaging when using plugin functionality when the custom class is not a core GE class type. + E.g. ensuring appropriate usage stats messaging when using plugin functionality when the custom class is not a core GX class type. """ def self_check(self, pretty_print): diff --git a/tests/data_context/fixtures/version_2-0_but_no_version_defined/great_expectations.yml b/tests/data_context/fixtures/version_2-0_but_no_version_defined/great_expectations.yml index ae92355a5627..61dab18dd8ad 100644 --- a/tests/data_context/fixtures/version_2-0_but_no_version_defined/great_expectations.yml +++ b/tests/data_context/fixtures/version_2-0_but_no_version_defined/great_expectations.yml @@ -20,7 +20,7 @@ datasources: {} # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the great_expectations.yml file, it will attempt # to replace the value of `my_key` with the value from an environment # variable `my_value` or a corresponding key read from this config file, diff --git a/tests/data_context/store/test_datasource_store_cloud_backend.py b/tests/data_context/store/test_datasource_store_cloud_backend.py index c211a9500520..467adfc1ffe6 100644 --- a/tests/data_context/store/test_datasource_store_cloud_backend.py +++ b/tests/data_context/store/test_datasource_store_cloud_backend.py @@ -227,7 +227,7 @@ def test_datasource_http_error_handling( resource_type=GXCloudRESTResource.DATASOURCE, ge_cloud_id=id ) with pytest.raises( - StoreBackendError, match=r"Unable to \w+ object in GE Cloud Store Backend: .*" + StoreBackendError, match=r"Unable to \w+ object in GX Cloud Store Backend: .*" ) as exc_info: backend_method = getattr(datasource_store_ge_cloud_backend, method) diff --git a/tests/data_context/store/test_gx_cloud_store_backend.py b/tests/data_context/store/test_gx_cloud_store_backend.py index f6a1c1d6c52d..bfd630d0f359 100644 --- a/tests/data_context/store/test_gx_cloud_store_backend.py +++ b/tests/data_context/store/test_gx_cloud_store_backend.py @@ -1,7 +1,7 @@ """ Test GXCloudStoreBackend behavior and adherence to StoreBackend contract. -Since GXCloudStoreBackend relies on GE Cloud, we mock requests and assert the right calls +Since GXCloudStoreBackend relies on GX Cloud, we mock requests and assert the right calls are made from the Store API (set, get, list, and remove_key). Note that although ge_cloud_access_token is provided (and is a valid UUID), no external diff --git a/tests/data_context/store/test_store_backends.py b/tests/data_context/store/test_store_backends.py index b96892034fe3..095e19288a06 100644 --- a/tests/data_context/store/test_store_backends.py +++ b/tests/data_context/store/test_store_backends.py @@ -1455,10 +1455,10 @@ def test_InlineStoreBackend(empty_data_context: DataContext) -> None: @pytest.mark.integration def test_InlineStoreBackend_with_mocked_fs(empty_data_context: DataContext) -> None: path_to_great_expectations_yml: str = os.path.join( - empty_data_context.root_directory, empty_data_context.GE_YML + empty_data_context.root_directory, empty_data_context.GX_YML ) - # 1. Set simple string config value and confirm it persists in the GE.yml + # 1. Set simple string config value and confirm it persists in the GX.yml inline_store_backend: InlineStoreBackend = InlineStoreBackend( data_context=empty_data_context, @@ -1481,7 +1481,7 @@ def test_InlineStoreBackend_with_mocked_fs(empty_data_context: DataContext) -> N assert config_commented_map_from_yaml["config_version"] == new_config_version - # 2. Set nested dictionary config value and confirm it persists in the GE.yml + # 2. Set nested dictionary config value and confirm it persists in the GX.yml inline_store_backend = InlineStoreBackend( data_context=empty_data_context, diff --git a/tests/data_context/test_data_context.py b/tests/data_context/test_data_context.py index acaa518ef4ef..25cbbceeb483 100644 --- a/tests/data_context/test_data_context.py +++ b/tests/data_context/test_data_context.py @@ -933,7 +933,7 @@ def test_load_data_context_from_environment_variables(tmp_path, monkeypatch): ), str(os.path.join(context_path, "great_expectations.yml")), ) - monkeypatch.setenv("GE_HOME", context_path) + monkeypatch.setenv("GX_HOME", context_path) assert DataContext.find_context_root_dir() == context_path @@ -1041,7 +1041,7 @@ def empty_context(tmp_path_factory): DataContext.create(project_path) ge_dir = os.path.join(project_path, "great_expectations") assert os.path.isdir(ge_dir) - assert os.path.isfile(os.path.join(ge_dir, DataContext.GE_YML)) + assert os.path.isfile(os.path.join(ge_dir, DataContext.GX_YML)) context = DataContext(ge_dir) assert isinstance(context, DataContext) return context @@ -1057,7 +1057,7 @@ def test_data_context_does_ge_yml_exist_returns_false_when_it_does_not_exist( ): ge_dir = empty_context.root_directory # mangle project - safe_remove(os.path.join(ge_dir, empty_context.GE_YML)) + safe_remove(os.path.join(ge_dir, empty_context.GX_YML)) assert DataContext.does_config_exist_on_disk(ge_dir) == False @@ -1080,7 +1080,7 @@ def test_data_context_does_project_have_a_datasource_in_config_file_returns_fals empty_context, ): ge_dir = empty_context.root_directory - safe_remove(os.path.join(ge_dir, empty_context.GE_YML)) + safe_remove(os.path.join(ge_dir, empty_context.GX_YML)) assert DataContext.does_project_have_a_datasource_in_config_file(ge_dir) == False @@ -1096,7 +1096,7 @@ def test_data_context_does_project_have_a_datasource_in_config_file_returns_fals empty_context, ): ge_dir = empty_context.root_directory - with open(os.path.join(ge_dir, DataContext.GE_YML), "w") as yml: + with open(os.path.join(ge_dir, DataContext.GX_YML), "w") as yml: yml.write("this file: is not a valid ge config") assert DataContext.does_project_have_a_datasource_in_config_file(ge_dir) == False @@ -1136,7 +1136,7 @@ def test_data_context_is_project_initialized_returns_false_when_config_yml_is_mi ): ge_dir = empty_context.root_directory # mangle project - safe_remove(os.path.join(ge_dir, empty_context.GE_YML)) + safe_remove(os.path.join(ge_dir, empty_context.GX_YML)) assert DataContext.is_project_initialized(ge_dir) == False @@ -1146,7 +1146,7 @@ def test_data_context_is_project_initialized_returns_false_when_uncommitted_dir_ ): ge_dir = empty_context.root_directory # mangle project - shutil.rmtree(os.path.join(ge_dir, empty_context.GE_UNCOMMITTED_DIR)) + shutil.rmtree(os.path.join(ge_dir, empty_context.GX_UNCOMMITTED_DIR)) assert DataContext.is_project_initialized(ge_dir) == False @@ -1156,7 +1156,7 @@ def test_data_context_is_project_initialized_returns_false_when_uncommitted_data ): ge_dir = empty_context.root_directory # mangle project - shutil.rmtree(os.path.join(ge_dir, empty_context.GE_UNCOMMITTED_DIR, "data_docs")) + shutil.rmtree(os.path.join(ge_dir, empty_context.GX_UNCOMMITTED_DIR, "data_docs")) assert DataContext.is_project_initialized(ge_dir) == False @@ -1166,7 +1166,7 @@ def test_data_context_is_project_initialized_returns_false_when_uncommitted_vali ): ge_dir = empty_context.root_directory # mangle project - shutil.rmtree(os.path.join(ge_dir, empty_context.GE_UNCOMMITTED_DIR, "validations")) + shutil.rmtree(os.path.join(ge_dir, empty_context.GX_UNCOMMITTED_DIR, "validations")) assert DataContext.is_project_initialized(ge_dir) == False @@ -1177,7 +1177,7 @@ def test_data_context_is_project_initialized_returns_false_when_config_variable_ ge_dir = empty_context.root_directory # mangle project safe_remove( - os.path.join(ge_dir, empty_context.GE_UNCOMMITTED_DIR, "config_variables.yml") + os.path.join(ge_dir, empty_context.GX_UNCOMMITTED_DIR, "config_variables.yml") ) assert DataContext.is_project_initialized(ge_dir) == False @@ -1323,7 +1323,7 @@ def test_data_context_create_builds_base_directories(tmp_path_factory): "checkpoints", "uncommitted", ]: - base_dir = os.path.join(project_path, context.GE_DIR, directory) + base_dir = os.path.join(project_path, context.GX_DIR, directory) assert os.path.isdir(base_dir) diff --git a/tests/data_context/test_data_context_data_docs_api.py b/tests/data_context/test_data_context_data_docs_api.py index fff220a3768b..f7ff78932b06 100644 --- a/tests/data_context/test_data_context_data_docs_api.py +++ b/tests/data_context/test_data_context_data_docs_api.py @@ -85,7 +85,7 @@ def context_with_multiple_built_sites(empty_data_context): assert os.path.isfile( os.path.join( context.root_directory, - context.GE_UNCOMMITTED_DIR, + context.GX_UNCOMMITTED_DIR, "data_docs", site, "index.html", @@ -186,7 +186,7 @@ def test_clean_data_docs_on_context_with_multiple_sites_with_no_site_name_cleans assert not os.path.isfile( os.path.join( context.root_directory, - context.GE_UNCOMMITTED_DIR, + context.GX_UNCOMMITTED_DIR, "data_docs", site, "index.html", @@ -200,7 +200,7 @@ def test_clean_data_docs_on_context_with_multiple_sites_with_existing_site_name_ context = context_with_multiple_built_sites assert context.clean_data_docs(site_name="another_local_site") is True data_docs_dir = os.path.join( - context.root_directory, context.GE_UNCOMMITTED_DIR, "data_docs" + context.root_directory, context.GX_UNCOMMITTED_DIR, "data_docs" ) assert not os.path.isfile( os.path.join(data_docs_dir, "another_local_site", "index.html") @@ -225,7 +225,7 @@ def test_existing_local_data_docs_urls_returns_url_on_project_with_no_datasource """ empty_directory = str(tmp_path_factory.mktemp("another_empty_project")) DataContext.create(empty_directory) - context = DataContext(os.path.join(empty_directory, DataContext.GE_DIR)) + context = DataContext(os.path.join(empty_directory, DataContext.GX_DIR)) obs = context.get_docs_sites_urls(only_if_exists=False) assert len(obs) == 1 @@ -239,7 +239,7 @@ def test_existing_local_data_docs_urls_returns_single_url_from_customized_local_ ): empty_directory = str(tmp_path_factory.mktemp("yo_yo")) DataContext.create(empty_directory) - ge_dir = os.path.join(empty_directory, DataContext.GE_DIR) + ge_dir = os.path.join(empty_directory, DataContext.GX_DIR) context = DataContext(ge_dir) context._project_config["data_docs_sites"] = { @@ -272,7 +272,7 @@ def test_existing_local_data_docs_urls_returns_multiple_urls_from_customized_loc ): empty_directory = str(tmp_path_factory.mktemp("yo_yo_ma")) DataContext.create(empty_directory) - ge_dir = os.path.join(empty_directory, DataContext.GE_DIR) + ge_dir = os.path.join(empty_directory, DataContext.GX_DIR) context = DataContext(ge_dir) context._project_config["data_docs_sites"] = { @@ -321,7 +321,7 @@ def test_build_data_docs_skipping_index_does_not_build_index( # TODO What's the latest and greatest way to use configs rather than my hackery? empty_directory = str(tmp_path_factory.mktemp("empty")) DataContext.create(empty_directory) - ge_dir = os.path.join(empty_directory, DataContext.GE_DIR) + ge_dir = os.path.join(empty_directory, DataContext.GX_DIR) context = DataContext(ge_dir) config = context.get_config() config.data_docs_sites = { diff --git a/tests/data_context/test_data_context_test_yaml_config.py b/tests/data_context/test_data_context_test_yaml_config.py index 03433083dafc..cf60d7964132 100644 --- a/tests/data_context/test_data_context_test_yaml_config.py +++ b/tests/data_context/test_data_context_test_yaml_config.py @@ -1196,7 +1196,7 @@ def test_golden_path_runtime_data_connector_pandas_datasource_configuration( """ Tests output of test_yaml_config() for a Datacontext configured with a Datasource with RuntimeDataConnector. Even though the test directory contains multiple files that can be read-in - by GE, the RuntimeDataConnector will output 0 data_assets, and return a "note" to the user. + by GX, the RuntimeDataConnector will output 0 data_assets, and return a "note" to the user. This is because the RuntimeDataConnector is not aware of data_assets until they are passed in through the RuntimeBatchRequest. diff --git a/tests/data_context/test_data_context_test_yaml_config_usage_stats.py b/tests/data_context/test_data_context_test_yaml_config_usage_stats.py index 731f838db28f..c2584618cdd2 100644 --- a/tests/data_context/test_data_context_test_yaml_config_usage_stats.py +++ b/tests/data_context/test_data_context_test_yaml_config_usage_stats.py @@ -75,7 +75,7 @@ def test_test_yaml_config_usage_stats_custom_type( ): """ What does this test and why? - We should be able to discern the GE parent class for a custom type and construct + We should be able to discern the GX parent class for a custom type and construct a useful usage stats event message. """ data_context: DataContext = empty_data_context_stats_enabled @@ -199,7 +199,7 @@ def test_test_yaml_config_usage_stats_custom_type_not_ge_subclass( ): """ What does this test and why? - We should be able to discern the GE parent class for a custom type and construct + We should be able to discern the GX parent class for a custom type and construct a useful usage stats event message. """ data_context: DataContext = empty_data_context_stats_enabled @@ -235,7 +235,7 @@ def test_test_yaml_config_usage_stats_simple_sqlalchemy_datasource_subclass( ): """ What does this test and why? - We should be able to discern the GE parent class for a custom type and construct + We should be able to discern the GX parent class for a custom type and construct a useful usage stats event message. This should be true for SimpleSqlalchemyDatasources. """ diff --git a/tests/data_context/test_data_context_utils.py b/tests/data_context/test_data_context_utils.py index 2adcfdbd79f4..0437a3573b80 100644 --- a/tests/data_context/test_data_context_utils.py +++ b/tests/data_context/test_data_context_utils.py @@ -335,7 +335,7 @@ def test_sanitize_config_masks_cloud_store_backend_access_tokens( ]["ge_cloud_credentials"]["access_token"] == ge_cloud_access_token ) - # expect that the GE Cloud token has been obscured + # expect that the GX Cloud token has been obscured assert ( store_config["store_backend"]["ge_cloud_credentials"]["access_token"] != ge_cloud_access_token diff --git a/tests/data_context/test_data_context_variables.py b/tests/data_context/test_data_context_variables.py index 54719f84c532..622a6a7276af 100644 --- a/tests/data_context/test_data_context_variables.py +++ b/tests/data_context/test_data_context_variables.py @@ -446,7 +446,7 @@ def test_data_context_variables_save_config( ephemeral_data_context_variables: EphemeralDataContextVariables, file_data_context_variables: FileDataContextVariables, cloud_data_context_variables: CloudDataContextVariables, - # The below GE Cloud variables were used to instantiate the above CloudDataContextVariables + # The below GX Cloud variables were used to instantiate the above CloudDataContextVariables ge_cloud_base_url: str, ge_cloud_organization_id: str, ge_cloud_access_token: str, @@ -568,7 +568,7 @@ def test_file_data_context_variables_e2e( # Review great_expectations.yml where values were written and confirm changes config_filepath = pathlib.Path(file_data_context.root_directory).joinpath( - file_data_context.GE_YML + file_data_context.GX_YML ) with open(config_filepath) as f: diff --git a/tests/datasource/test_datasource_anonymizer.py b/tests/datasource/test_datasource_anonymizer.py index 172782b6e652..87be0f1cc473 100644 --- a/tests/datasource/test_datasource_anonymizer.py +++ b/tests/datasource/test_datasource_anonymizer.py @@ -147,7 +147,7 @@ def test_anonymize_datasource_info_v2_api_custom_subclass( ): """ What does this test and why? - We should be able to discern the GE parent class for a custom type and construct + We should be able to discern the GX parent class for a custom type and construct a useful usage stats event message. Custom v2 API Datasources should continue to be supported. """ diff --git a/tests/datasource/test_pandas_datasource.py b/tests/datasource/test_pandas_datasource.py index 9a7dbd164eda..be8fba826374 100644 --- a/tests/datasource/test_pandas_datasource.py +++ b/tests/datasource/test_pandas_datasource.py @@ -327,7 +327,7 @@ def test_s3_pandas_source_read_parquet( data_context_parameterized_expectation_suite.create_expectation_suite( expectation_suite_name="test_parquet" ) - with pytest.deprecated_call(): # "Direct GE Support for the s3 BatchKwarg will be removed in v0.16. + with pytest.deprecated_call(): # "Direct GX Support for the s3 BatchKwarg will be removed in v0.16. batch = data_context_parameterized_expectation_suite.get_batch( data_context_parameterized_expectation_suite.build_batch_kwargs( "parquet_source", diff --git a/tests/datasource/test_sparkdf_datasource.py b/tests/datasource/test_sparkdf_datasource.py index 017b4f7d81c0..72400a491f28 100644 --- a/tests/datasource/test_sparkdf_datasource.py +++ b/tests/datasource/test_sparkdf_datasource.py @@ -150,7 +150,7 @@ def test_spark_kwargs_are_passed_through( ): """ Ensure that an external SparkSession is not stopped when the spark_config matches - the one specified in the GE Context. + the one specified in the GX Context. """ if "SparkDFDataset" not in test_backends: pytest.skip("No spark backend selected.") diff --git a/tests/execution_engine/split_and_sample/test_sqlalchemy_execution_engine_sampling.py b/tests/execution_engine/split_and_sample/test_sqlalchemy_execution_engine_sampling.py index 4f3a1c88c762..f23946a00cb6 100644 --- a/tests/execution_engine/split_and_sample/test_sqlalchemy_execution_engine_sampling.py +++ b/tests/execution_engine/split_and_sample/test_sqlalchemy_execution_engine_sampling.py @@ -16,7 +16,7 @@ from great_expectations.execution_engine.sqlalchemy_batch_data import ( SqlAlchemyBatchData, ) -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect from great_expectations.self_check.util import build_sa_engine from great_expectations.util import import_library_module @@ -76,22 +76,22 @@ def clean_query_for_comparison(query_string: str) -> str: @pytest.fixture def dialect_name_to_sql_statement(): - def _dialect_name_to_sql_statement(dialect_name: GESqlDialect) -> str: + def _dialect_name_to_sql_statement(dialect_name: GXSqlDialect) -> str: dialect_name_to_sql_statement: dict = { - GESqlDialect.POSTGRESQL: "SELECT * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE TRUE LIMIT 10", - GESqlDialect.MYSQL: "SELECT * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE TRUE = 1 LIMIT 10", - GESqlDialect.ORACLE: "SELECT * FROM test_schema_name.test_table WHERE 1 = 1 AND ROWNUM <= 10", - GESqlDialect.MSSQL: "SELECT TOP 10 * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE 1 = 1", - GESqlDialect.SQLITE: "SELECT * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE 1 = 1 LIMIT 10 OFFSET 0", - GESqlDialect.BIGQUERY: "SELECT * FROM `TEST_SCHEMA_NAME`.`TEST_TABLE` WHERE TRUE LIMIT 10", - GESqlDialect.SNOWFLAKE: "SELECT * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE TRUE LIMIT 10", - GESqlDialect.REDSHIFT: "SELECT * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE TRUE LIMIT 10", - GESqlDialect.AWSATHENA: 'SELECT * FROM "TEST_SCHEMA_NAME"."TEST_TABLE" WHERE TRUE LIMIT 10', - GESqlDialect.DREMIO: 'SELECT * FROM "TEST_SCHEMA_NAME"."TEST_TABLE" WHERE 1 = 1 LIMIT 10', - GESqlDialect.TERADATASQL: "SELECT TOP 10 * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE 1 = 1", - GESqlDialect.TRINO: "SELECT * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE TRUE LIMIT 10", - GESqlDialect.HIVE: "SELECT * FROM `TEST_SCHEMA_NAME`.`TEST_TABLE` WHERE TRUE LIMIT 10", - GESqlDialect.VERTICA: "SELECT * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE TRUE LIMIT 10", + GXSqlDialect.POSTGRESQL: "SELECT * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE TRUE LIMIT 10", + GXSqlDialect.MYSQL: "SELECT * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE TRUE = 1 LIMIT 10", + GXSqlDialect.ORACLE: "SELECT * FROM test_schema_name.test_table WHERE 1 = 1 AND ROWNUM <= 10", + GXSqlDialect.MSSQL: "SELECT TOP 10 * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE 1 = 1", + GXSqlDialect.SQLITE: "SELECT * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE 1 = 1 LIMIT 10 OFFSET 0", + GXSqlDialect.BIGQUERY: "SELECT * FROM `TEST_SCHEMA_NAME`.`TEST_TABLE` WHERE TRUE LIMIT 10", + GXSqlDialect.SNOWFLAKE: "SELECT * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE TRUE LIMIT 10", + GXSqlDialect.REDSHIFT: "SELECT * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE TRUE LIMIT 10", + GXSqlDialect.AWSATHENA: 'SELECT * FROM "TEST_SCHEMA_NAME"."TEST_TABLE" WHERE TRUE LIMIT 10', + GXSqlDialect.DREMIO: 'SELECT * FROM "TEST_SCHEMA_NAME"."TEST_TABLE" WHERE 1 = 1 LIMIT 10', + GXSqlDialect.TERADATASQL: "SELECT TOP 10 * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE 1 = 1", + GXSqlDialect.TRINO: "SELECT * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE TRUE LIMIT 10", + GXSqlDialect.HIVE: "SELECT * FROM `TEST_SCHEMA_NAME`.`TEST_TABLE` WHERE TRUE LIMIT 10", + GXSqlDialect.VERTICA: "SELECT * FROM TEST_SCHEMA_NAME.TEST_TABLE WHERE TRUE LIMIT 10", } return dialect_name_to_sql_statement[dialect_name] @@ -104,11 +104,11 @@ def _dialect_name_to_sql_statement(dialect_name: GESqlDialect) -> str: pytest.param( dialect_name, id=dialect_name.value, marks=pytest.mark.external_sqldialect ) - for dialect_name in GESqlDialect.get_all_dialects() + for dialect_name in GXSqlDialect.get_all_dialects() ], ) def test_sample_using_limit_builds_correct_query_where_clause_none( - dialect_name: GESqlDialect, dialect_name_to_sql_statement, sa + dialect_name: GXSqlDialect, dialect_name_to_sql_statement, sa ): """What does this test and why? @@ -118,34 +118,34 @@ def test_sample_using_limit_builds_correct_query_where_clause_none( # 1. Setup class MockSqlAlchemyExecutionEngine: - def __init__(self, dialect_name: GESqlDialect): + def __init__(self, dialect_name: GXSqlDialect): self._dialect_name = dialect_name self._connection_string = self.dialect_name_to_connection_string( dialect_name ) DIALECT_TO_CONNECTION_STRING_STUB: dict = { - GESqlDialect.POSTGRESQL: "postgresql://", - GESqlDialect.MYSQL: "mysql+pymysql://", - GESqlDialect.ORACLE: "oracle+cx_oracle://", - GESqlDialect.MSSQL: "mssql+pyodbc://", - GESqlDialect.SQLITE: "sqlite:///", - GESqlDialect.BIGQUERY: "bigquery://", - GESqlDialect.SNOWFLAKE: "snowflake://", - GESqlDialect.REDSHIFT: "redshift+psycopg2://", - GESqlDialect.AWSATHENA: f"awsathena+rest://@athena.us-east-1.amazonaws.com/some_test_db?s3_staging_dir=s3://some-s3-path/", - GESqlDialect.DREMIO: "dremio://", - GESqlDialect.TERADATASQL: "teradatasql://", - GESqlDialect.TRINO: "trino://", - GESqlDialect.HIVE: "hive://", - GESqlDialect.VERTICA: "vertica+vertica_python://", + GXSqlDialect.POSTGRESQL: "postgresql://", + GXSqlDialect.MYSQL: "mysql+pymysql://", + GXSqlDialect.ORACLE: "oracle+cx_oracle://", + GXSqlDialect.MSSQL: "mssql+pyodbc://", + GXSqlDialect.SQLITE: "sqlite:///", + GXSqlDialect.BIGQUERY: "bigquery://", + GXSqlDialect.SNOWFLAKE: "snowflake://", + GXSqlDialect.REDSHIFT: "redshift+psycopg2://", + GXSqlDialect.AWSATHENA: f"awsathena+rest://@athena.us-east-1.amazonaws.com/some_test_db?s3_staging_dir=s3://some-s3-path/", + GXSqlDialect.DREMIO: "dremio://", + GXSqlDialect.TERADATASQL: "teradatasql://", + GXSqlDialect.TRINO: "trino://", + GXSqlDialect.HIVE: "hive://", + GXSqlDialect.VERTICA: "vertica+vertica_python://", } @property def dialect_name(self) -> str: return self._dialect_name.value - def dialect_name_to_connection_string(self, dialect_name: GESqlDialect) -> str: + def dialect_name_to_connection_string(self, dialect_name: GXSqlDialect) -> str: return self.DIALECT_TO_CONNECTION_STRING_STUB.get(dialect_name) _BIGQUERY_MODULE_NAME = "sqlalchemy_bigquery" @@ -154,18 +154,18 @@ def dialect_name_to_connection_string(self, dialect_name: GESqlDialect) -> str: def dialect(self) -> sa.engine.Dialect: # TODO: AJB 20220512 move this dialect retrieval to a separate class from the SqlAlchemyExecutionEngine # and then use it here. - dialect_name: GESqlDialect = self._dialect_name - if dialect_name == GESqlDialect.ORACLE: + dialect_name: GXSqlDialect = self._dialect_name + if dialect_name == GXSqlDialect.ORACLE: # noinspection PyUnresolvedReferences return import_library_module( module_name="sqlalchemy.dialects.oracle" ).dialect() - elif dialect_name == GESqlDialect.SNOWFLAKE: + elif dialect_name == GXSqlDialect.SNOWFLAKE: # noinspection PyUnresolvedReferences return import_library_module( module_name="snowflake.sqlalchemy.snowdialect" ).dialect() - elif dialect_name == GESqlDialect.DREMIO: + elif dialect_name == GXSqlDialect.DREMIO: # WARNING: Dremio Support is experimental, functionality is not fully under test # noinspection PyUnresolvedReferences return import_library_module( @@ -177,12 +177,12 @@ def dialect(self) -> sa.engine.Dialect: # return import_library_module( # module_name="sqlalchemy_redshift.dialect" # ).RedshiftDialect - elif dialect_name == GESqlDialect.BIGQUERY: + elif dialect_name == GXSqlDialect.BIGQUERY: # noinspection PyUnresolvedReferences return import_library_module( module_name=self._BIGQUERY_MODULE_NAME ).dialect() - elif dialect_name == GESqlDialect.TERADATASQL: + elif dialect_name == GXSqlDialect.TERADATASQL: # WARNING: Teradata Support is experimental, functionality is not fully under test # noinspection PyUnresolvedReferences return import_library_module( diff --git a/tests/execution_engine/test_sqlalchemy_batch_data.py b/tests/execution_engine/test_sqlalchemy_batch_data.py index 95ad03f656b5..db17285fcd16 100644 --- a/tests/execution_engine/test_sqlalchemy_batch_data.py +++ b/tests/execution_engine/test_sqlalchemy_batch_data.py @@ -4,7 +4,7 @@ from great_expectations.core.batch_spec import SqlAlchemyDatasourceBatchSpec from great_expectations.execution_engine import SqlAlchemyExecutionEngine -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect try: sqlalchemy = pytest.importorskip("sqlalchemy") @@ -150,7 +150,7 @@ def test_instantiation_with_unknown_dialect(sqlite_view_engine): table_name="test_table", ) - assert batch_data.dialect == GESqlDialect.OTHER + assert batch_data.dialect == GXSqlDialect.OTHER @pytest.mark.unit diff --git a/tests/execution_engine/test_sqlalchemy_dialect.py b/tests/execution_engine/test_sqlalchemy_dialect.py index 6c05ddbbf952..8c9892021775 100644 --- a/tests/execution_engine/test_sqlalchemy_dialect.py +++ b/tests/execution_engine/test_sqlalchemy_dialect.py @@ -1,33 +1,33 @@ import pytest -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect @pytest.mark.unit def test_dialect_instantiation_with_string(): - assert GESqlDialect("hive") == GESqlDialect.HIVE + assert GXSqlDialect("hive") == GXSqlDialect.HIVE @pytest.mark.unit def test_dialect_instantiation_with_byte_string(): - assert GESqlDialect(b"hive") == GESqlDialect.HIVE + assert GXSqlDialect(b"hive") == GXSqlDialect.HIVE @pytest.mark.unit def test_string_equivalence(): - assert GESqlDialect.HIVE == "hive" + assert GXSqlDialect.HIVE == "hive" @pytest.mark.unit def test_byte_string_equivalence(): - assert GESqlDialect.HIVE == b"hive" + assert GXSqlDialect.HIVE == b"hive" @pytest.mark.unit def test_get_all_dialect_names_no_other_dialects(): - assert GESqlDialect.OTHER.value not in GESqlDialect.get_all_dialect_names() + assert GXSqlDialect.OTHER.value not in GXSqlDialect.get_all_dialect_names() @pytest.mark.unit def test_get_all_dialects_no_other_dialects(): - assert GESqlDialect.OTHER not in GESqlDialect.get_all_dialects() + assert GXSqlDialect.OTHER not in GXSqlDialect.get_all_dialects() diff --git a/tests/execution_engine/test_sqlalchemy_execution_engine.py b/tests/execution_engine/test_sqlalchemy_execution_engine.py index 8f22d60444a2..ab1d90a32e6f 100644 --- a/tests/execution_engine/test_sqlalchemy_execution_engine.py +++ b/tests/execution_engine/test_sqlalchemy_execution_engine.py @@ -15,7 +15,7 @@ from great_expectations.execution_engine.sqlalchemy_batch_data import ( SqlAlchemyBatchData, ) -from great_expectations.execution_engine.sqlalchemy_dialect import GESqlDialect +from great_expectations.execution_engine.sqlalchemy_dialect import GXSqlDialect from great_expectations.execution_engine.sqlalchemy_execution_engine import ( SqlAlchemyExecutionEngine, ) @@ -90,7 +90,7 @@ def test_instantiation_via_url_and_retrieve_data_with_other_dialect(sa): assert my_execution_engine.credentials is None assert my_execution_engine.url[-36:] == "test_cases_for_sql_data_connector.db" - # 2. Change dialect to one not listed in GESqlDialect + # 2. Change dialect to one not listed in GXSqlDialect my_execution_engine.engine.dialect.name = "other_dialect" # 3. Get data @@ -105,7 +105,7 @@ def test_instantiation_via_url_and_retrieve_data_with_other_dialect(sa): # 4. Assert dialect and data are as expected - assert batch_data.dialect == GESqlDialect.OTHER + assert batch_data.dialect == GXSqlDialect.OTHER my_execution_engine.load_batch_data("__", batch_data) validator = Validator(my_execution_engine) diff --git a/tests/integration/common_workflows/pandas_execution_engine_with_gcp_installed.py b/tests/integration/common_workflows/pandas_execution_engine_with_gcp_installed.py index cfbc92462acd..92d62e15d44a 100644 --- a/tests/integration/common_workflows/pandas_execution_engine_with_gcp_installed.py +++ b/tests/integration/common_workflows/pandas_execution_engine_with_gcp_installed.py @@ -25,7 +25,7 @@ def temp_gc_creds() -> None: # What does this test and why? -# A common initial use of GE is locally, with the PandasExecutionEngine +# A common initial use of GX is locally, with the PandasExecutionEngine # A user of GCP could also have the GOOGLE_APPLICATION_CREDENTIALS set # This workflow was broken for a short time by PR # 3679 and then reverted with PR # 3689 and fixed with PR #3694 # The following test ensures that this simple workflow still works diff --git a/tests/integration/common_workflows/simple_build_data_docs.py b/tests/integration/common_workflows/simple_build_data_docs.py index 3adec487e600..7bed3a49a380 100644 --- a/tests/integration/common_workflows/simple_build_data_docs.py +++ b/tests/integration/common_workflows/simple_build_data_docs.py @@ -14,17 +14,17 @@ As indicated in issue #3772, calling `context.build_data_docs()` raised an unexpected exception when Great Expectations was installed in a non-filesystem location (i.e. it failed when -GE was installed inside a zip file -which is a location allowed by PEP 273-). +GX was installed inside a zip file -which is a location allowed by PEP 273-). -Therefore, this test is intended to be run after installing GE inside a zip file and +Therefore, this test is intended to be run after installing GX inside a zip file and then setting the appropriate PYTHONPATH env variable. If desired, this test can also be -run after installing GE in a normal filesystem location (i.e. a directory). +run after installing GX in a normal filesystem location (i.e. a directory). This test is OK if it finishes without raising an exception. To make it easier to debug this test, it prints: -* The location of the GE library: to verify that we are testing the library that we want -* The version of the GE library: idem +* The location of the GX library: to verify that we are testing the library that we want +* The version of the GX library: idem * data_docs url: If everything works, this will be a url (e.g. starting with file://...) diff --git a/tests/integration/db/bigquery.py b/tests/integration/db/bigquery.py index 47cf17229fce..195dbde7bd9e 100644 --- a/tests/integration/db/bigquery.py +++ b/tests/integration/db/bigquery.py @@ -13,7 +13,7 @@ """ What does this test and why? -This integration test is part of the deprecation of the bigquery_temp_table parameter. As of GE 0.15.3, +This integration test is part of the deprecation of the bigquery_temp_table parameter. As of GX 0.15.3, tables that are created as the result of a query are created as permanent tables with an expiration of 24 hours, with more information to be found: diff --git a/tests/integration/docusaurus/setup/configuring_data_contexts/how_to_configure_credentials.py b/tests/integration/docusaurus/setup/configuring_data_contexts/how_to_configure_credentials.py index 277c0bf2e6a2..e82657c30dd3 100644 --- a/tests/integration/docusaurus/setup/configuring_data_contexts/how_to_configure_credentials.py +++ b/tests/integration/docusaurus/setup/configuring_data_contexts/how_to_configure_credentials.py @@ -72,7 +72,7 @@ # get context and set config variables in config_variables.yml context = gx.get_context() context_config_variables_relative_file_path = os.path.join( - context.GE_UNCOMMITTED_DIR, "config_variables.yml" + context.GX_UNCOMMITTED_DIR, "config_variables.yml" ) assert ( yaml.safe_load(config_variables_file_path)["config_variables_file_path"] diff --git a/tests/integration/docusaurus/tutorials/getting-started/getting_started.py b/tests/integration/docusaurus/tutorials/getting-started/getting_started.py index 09318a3d9d04..d4ab7e78c49f 100644 --- a/tests/integration/docusaurus/tutorials/getting-started/getting_started.py +++ b/tests/integration/docusaurus/tutorials/getting-started/getting_started.py @@ -43,7 +43,7 @@ """ # -# Note : this override is for internal GE purposes, and is intended to helps us better understand how the +# Note : this override is for internal GX purposes, and is intended to helps us better understand how the # Getting Started Guide is being used. It can be ignored by users. datasource_yaml = datasource_yaml.replace( "getting_started_datasource", GETTING_STARTED_DATASOURCE_NAME @@ -60,7 +60,7 @@ limit=1000, ) -# Note : this override is for internal GE purposes, and is intended to helps us better understand how the +# Note : this override is for internal GX purposes, and is intended to helps us better understand how the # Getting Started Guide is being used. It can be ignored by users. batch_request = BatchRequest( datasource_name=GETTING_STARTED_DATASOURCE_NAME, @@ -71,7 +71,7 @@ expectation_suite_name = "getting_started_expectation_suite_taxi.demo" -# Note : this override is for internal GE purposes, and is intended to helps us better understand how the +# Note : this override is for internal GX purposes, and is intended to helps us better understand how the # Getting Started Guide is being used. It can be ignored by users expectation_suite_name = GETTING_STARTED_EXPECTATION_SUITE_NAME @@ -137,7 +137,7 @@ index: -1 expectation_suite_name: getting_started_expectation_suite_taxi.demo """ -# Note : these overrides are for internal GE purposes, and are intended to helps us better understand how the +# Note : these overrides are for internal GX purposes, and are intended to helps us better understand how the # Getting Started Guide is being used. It can be ignored by users my_checkpoint_config = my_checkpoint_config.replace( "getting_started_checkpoint", GETTING_STARTED_CHECKPOINT_NAME @@ -180,7 +180,7 @@ expectation_suite_name: getting_started_expectation_suite_taxi.demo """ # -# Note : this override is for internal GE purposes, and is intended to helps us better understand how the +# Note : this override is for internal GX purposes, and is intended to helps us better understand how the # Getting Started Guide is being used. It can be ignored by users yaml_config = yaml_config.replace( "getting_started_checkpoint", GETTING_STARTED_CHECKPOINT_NAME diff --git a/tests/integration/fixtures/gcp_deployment/ge_checkpoint_bigquery.py b/tests/integration/fixtures/gcp_deployment/ge_checkpoint_bigquery.py index fa4db2087658..0b47780f3e92 100644 --- a/tests/integration/fixtures/gcp_deployment/ge_checkpoint_bigquery.py +++ b/tests/integration/fixtures/gcp_deployment/ge_checkpoint_bigquery.py @@ -11,9 +11,9 @@ } dag = DAG( - "GE_checkpoint_run", + "GX_checkpoint_run", default_args=default_args, - description="running GE checkpoint", + description="running GX checkpoint", schedule_interval=None, dagrun_timeout=timedelta(minutes=5), ) diff --git a/tests/integration/fixtures/gcp_deployment/ge_checkpoint_gcs.py b/tests/integration/fixtures/gcp_deployment/ge_checkpoint_gcs.py index f520026ae0f7..01b9ac479dc2 100644 --- a/tests/integration/fixtures/gcp_deployment/ge_checkpoint_gcs.py +++ b/tests/integration/fixtures/gcp_deployment/ge_checkpoint_gcs.py @@ -11,9 +11,9 @@ } dag = DAG( - "GE_checkpoint_run", + "GX_checkpoint_run", default_args=default_args, - description="running GE checkpoint", + description="running GX checkpoint", schedule_interval=None, dagrun_timeout=timedelta(minutes=5), ) diff --git a/tests/integration/fixtures/yellow_tripdata_pandas_fixture/great_expectations/great_expectations.yml b/tests/integration/fixtures/yellow_tripdata_pandas_fixture/great_expectations/great_expectations.yml index 0326a2812ae4..9208b923c7b9 100644 --- a/tests/integration/fixtures/yellow_tripdata_pandas_fixture/great_expectations/great_expectations.yml +++ b/tests/integration/fixtures/yellow_tripdata_pandas_fixture/great_expectations/great_expectations.yml @@ -41,7 +41,7 @@ datasources: # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the great_expectations.yml file, it will attempt # to replace the value of `my_key` with the value from an environment # variable `my_value` or a corresponding key read from this config file, diff --git a/tests/integration/test_script_runner.py b/tests/integration/test_script_runner.py index 3688a75a02a7..af24c00393ed 100644 --- a/tests/integration/test_script_runner.py +++ b/tests/integration/test_script_runner.py @@ -1845,7 +1845,7 @@ def _execute_integration_test( try: base_dir = file_relative_path(__file__, "../../") os.chdir(base_dir) - # Ensure GE is installed in our environment + # Ensure GX is installed in our environment installed_packages = [pkg.key for pkg in pkg_resources.working_set] if "great-expectations" not in installed_packages: execute_shell_command("pip install .") diff --git a/tests/render/test_render_BulletListContentBlock.py b/tests/render/test_render_BulletListContentBlock.py index cb7c807def98..bc8a4e9a31a7 100644 --- a/tests/render/test_render_BulletListContentBlock.py +++ b/tests/render/test_render_BulletListContentBlock.py @@ -71,7 +71,7 @@ def test_all_expectations_using_test_definitions(): # The 5 Expectations noted below are implemented or updated after v0.13.0 and are incompatible with this test fixture due to # having incomplete render methods. # - # As this behavior is implemented, the `UNSUPPORTED_EXPECTATIONS` list will be updated to reflect GE's current capabilities. + # As this behavior is implemented, the `UNSUPPORTED_EXPECTATIONS` list will be updated to reflect GX's current capabilities. dir_path = os.path.dirname(os.path.abspath(__file__)) pattern = os.path.join( diff --git a/tests/test_fixtures/configuration_for_testing_v2_v3_migration/pandas/v2/great_expectations/great_expectations.yml b/tests/test_fixtures/configuration_for_testing_v2_v3_migration/pandas/v2/great_expectations/great_expectations.yml index dad114aa8a1a..3d1aaa42a404 100644 --- a/tests/test_fixtures/configuration_for_testing_v2_v3_migration/pandas/v2/great_expectations/great_expectations.yml +++ b/tests/test_fixtures/configuration_for_testing_v2_v3_migration/pandas/v2/great_expectations/great_expectations.yml @@ -29,7 +29,7 @@ datasources: # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the great_expectations.yml file, it will attempt # to replace the value of `my_key` with the value from an environment # variable `my_value` or a corresponding key read from this config file, diff --git a/tests/test_fixtures/configuration_for_testing_v2_v3_migration/pandas/v3/great_expectations/great_expectations.yml b/tests/test_fixtures/configuration_for_testing_v2_v3_migration/pandas/v3/great_expectations/great_expectations.yml index d7cd100f15d4..e19280796c85 100644 --- a/tests/test_fixtures/configuration_for_testing_v2_v3_migration/pandas/v3/great_expectations/great_expectations.yml +++ b/tests/test_fixtures/configuration_for_testing_v2_v3_migration/pandas/v3/great_expectations/great_expectations.yml @@ -39,7 +39,7 @@ datasources: # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the great_expectations.yml file, it will attempt # to replace the value of `my_key` with the value from an environment # variable `my_value` or a corresponding key read from this config file, diff --git a/tests/test_fixtures/configuration_for_testing_v2_v3_migration/postgresql/v2/great_expectations/great_expectations.yml b/tests/test_fixtures/configuration_for_testing_v2_v3_migration/postgresql/v2/great_expectations/great_expectations.yml index 2797715ef91f..9854f72c3f8f 100644 --- a/tests/test_fixtures/configuration_for_testing_v2_v3_migration/postgresql/v2/great_expectations/great_expectations.yml +++ b/tests/test_fixtures/configuration_for_testing_v2_v3_migration/postgresql/v2/great_expectations/great_expectations.yml @@ -26,7 +26,7 @@ datasources: # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the great_expectations.yml file, it will attempt # to replace the value of `my_key` with the value from an environment # variable `my_value` or a corresponding key read from this config file, diff --git a/tests/test_fixtures/configuration_for_testing_v2_v3_migration/postgresql/v3/great_expectations/great_expectations.yml b/tests/test_fixtures/configuration_for_testing_v2_v3_migration/postgresql/v3/great_expectations/great_expectations.yml index 494c4a99f1d5..36052eee371a 100644 --- a/tests/test_fixtures/configuration_for_testing_v2_v3_migration/postgresql/v3/great_expectations/great_expectations.yml +++ b/tests/test_fixtures/configuration_for_testing_v2_v3_migration/postgresql/v3/great_expectations/great_expectations.yml @@ -36,7 +36,7 @@ datasources: # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the great_expectations.yml file, it will attempt # to replace the value of `my_key` with the value from an environment # variable `my_value` or a corresponding key read from this config file, diff --git a/tests/test_fixtures/configuration_for_testing_v2_v3_migration/spark/v2/great_expectations/great_expectations.yml b/tests/test_fixtures/configuration_for_testing_v2_v3_migration/spark/v2/great_expectations/great_expectations.yml index 20f594f420f0..07c772ecf0f9 100644 --- a/tests/test_fixtures/configuration_for_testing_v2_v3_migration/spark/v2/great_expectations/great_expectations.yml +++ b/tests/test_fixtures/configuration_for_testing_v2_v3_migration/spark/v2/great_expectations/great_expectations.yml @@ -30,7 +30,7 @@ datasources: # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the great_expectations.yml file, it will attempt # to replace the value of `my_key` with the value from an environment # variable `my_value` or a corresponding key read from this config file, diff --git a/tests/test_fixtures/configuration_for_testing_v2_v3_migration/spark/v3/great_expectations/great_expectations.yml b/tests/test_fixtures/configuration_for_testing_v2_v3_migration/spark/v3/great_expectations/great_expectations.yml index fbad62e7f58e..de1ffa8ac586 100644 --- a/tests/test_fixtures/configuration_for_testing_v2_v3_migration/spark/v3/great_expectations/great_expectations.yml +++ b/tests/test_fixtures/configuration_for_testing_v2_v3_migration/spark/v3/great_expectations/great_expectations.yml @@ -42,7 +42,7 @@ datasources: # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the great_expectations.yml file, it will attempt # to replace the value of `my_key` with the value from an environment # variable `my_value` or a corresponding key read from this config file, diff --git a/tests/test_fixtures/great_expectations_basic_with_bad_notebooks.yml b/tests/test_fixtures/great_expectations_basic_with_bad_notebooks.yml index f56a42eb6ee3..f36199bda726 100644 --- a/tests/test_fixtures/great_expectations_basic_with_bad_notebooks.yml +++ b/tests/test_fixtures/great_expectations_basic_with_bad_notebooks.yml @@ -74,7 +74,7 @@ validation_operators: target_store_name: evaluation_parameter_store notebooks: - # Notebooks are generated by GE to provide interactive environments + # Notebooks are generated by GX to provide interactive environments # to develop suites. Here you can customize the resulting notebooks. suite_edit: class_name: SuiteEditNotebookRenderer diff --git a/tests/test_fixtures/great_expectations_custom_local_site_config.yml b/tests/test_fixtures/great_expectations_custom_local_site_config.yml index 816d413a472c..98df71628f33 100644 --- a/tests/test_fixtures/great_expectations_custom_local_site_config.yml +++ b/tests/test_fixtures/great_expectations_custom_local_site_config.yml @@ -66,7 +66,7 @@ stores: data_docs_sites: local_site: # site name - # “local_site” renders documentation for all the datasources in the project from GE artifacts in the local repo. + # “local_site” renders documentation for all the datasources in the project from GX artifacts in the local repo. # The site includes expectation suites and profiling and validation results from uncommitted directory. # Local site provides the convenience of visualizing all the entities stored in JSON files as HTML. diff --git a/tests/test_fixtures/great_expectations_custom_notebooks.yml b/tests/test_fixtures/great_expectations_custom_notebooks.yml index 4da7d4fb2d13..83a33d5666e5 100644 --- a/tests/test_fixtures/great_expectations_custom_notebooks.yml +++ b/tests/test_fixtures/great_expectations_custom_notebooks.yml @@ -74,7 +74,7 @@ validation_operators: target_store_name: evaluation_parameter_store notebooks: -# Notebooks are generated by GE to provide interactive environments +# Notebooks are generated by GX to provide interactive environments # to develop suites. Here you can customize the resulting notebooks. suite_edit: class_name: SuiteEditNotebookRenderer diff --git a/tests/test_fixtures/great_expectations_custom_notebooks_defaults.yml b/tests/test_fixtures/great_expectations_custom_notebooks_defaults.yml index 2ee2ad8a0036..f862aff33e0b 100644 --- a/tests/test_fixtures/great_expectations_custom_notebooks_defaults.yml +++ b/tests/test_fixtures/great_expectations_custom_notebooks_defaults.yml @@ -74,7 +74,7 @@ validation_operators: target_store_name: evaluation_parameter_store notebooks: -# Notebooks are generated by GE to provide interactive environments +# Notebooks are generated by GX to provide interactive environments # to develop suites. Here you can customize the resulting notebooks. suite_edit: class_name: SuiteEditNotebookRenderer diff --git a/tests/test_fixtures/great_expectations_site_builder.yml b/tests/test_fixtures/great_expectations_site_builder.yml index 34092b651343..becfcdf7e1ab 100644 --- a/tests/test_fixtures/great_expectations_site_builder.yml +++ b/tests/test_fixtures/great_expectations_site_builder.yml @@ -42,7 +42,7 @@ stores: data_docs_sites: local_site: # site name - # “local_site” renders documentation for all the datasources in the project from GE artifacts in the local repo. + # “local_site” renders documentation for all the datasources in the project from GX artifacts in the local repo. # The site includes expectation suites and profiling and validation results from uncommitted directory. # Local site provides the convenience of visualizing all the entities stored in JSON files as HTML. diff --git a/tests/test_fixtures/great_expectations_v013_bad_notebooks.yml b/tests/test_fixtures/great_expectations_v013_bad_notebooks.yml index 0794723ebbb9..4b9e56125ae2 100644 --- a/tests/test_fixtures/great_expectations_v013_bad_notebooks.yml +++ b/tests/test_fixtures/great_expectations_v013_bad_notebooks.yml @@ -87,7 +87,7 @@ stores: class_name: ValidationsStore notebooks: -# Notebooks are generated by GE to provide interactive environments +# Notebooks are generated by GX to provide interactive environments # to develop suites. Here you can customize the resulting notebooks. suite_edit: class_name: SuiteEditNotebookRenderer diff --git a/tests/test_fixtures/great_expectations_v013_custom_notebooks.yml b/tests/test_fixtures/great_expectations_v013_custom_notebooks.yml index f393f10199df..877d7b585dfd 100644 --- a/tests/test_fixtures/great_expectations_v013_custom_notebooks.yml +++ b/tests/test_fixtures/great_expectations_v013_custom_notebooks.yml @@ -87,7 +87,7 @@ stores: class_name: ValidationsStore notebooks: -# Notebooks are generated by GE to provide interactive environments +# Notebooks are generated by GX to provide interactive environments # to develop suites. Here you can customize the resulting notebooks. suite_edit: class_name: SuiteEditNotebookRenderer diff --git a/tests/test_fixtures/great_expectations_v013_site_builder.yml b/tests/test_fixtures/great_expectations_v013_site_builder.yml index 318698998144..4e1cb550306b 100644 --- a/tests/test_fixtures/great_expectations_v013_site_builder.yml +++ b/tests/test_fixtures/great_expectations_v013_site_builder.yml @@ -49,7 +49,7 @@ stores: data_docs_sites: local_site: # site name - # “local_site” renders documentation for all the datasources in the project from GE artifacts in the local repo. + # “local_site” renders documentation for all the datasources in the project from GX artifacts in the local repo. # The site includes expectation suites and profiling and validation results from uncommitted directory. # Local site provides the convenience of visualizing all the entities stored in JSON files as HTML. diff --git a/tests/test_fixtures/rule_based_profiler/example_notebooks/MultiBatchExample_ConfiguredAssetFileSystemExample_Pandas.ipynb b/tests/test_fixtures/rule_based_profiler/example_notebooks/MultiBatchExample_ConfiguredAssetFileSystemExample_Pandas.ipynb index 0b876633f1fa..75b9369e29d6 100644 --- a/tests/test_fixtures/rule_based_profiler/example_notebooks/MultiBatchExample_ConfiguredAssetFileSystemExample_Pandas.ipynb +++ b/tests/test_fixtures/rule_based_profiler/example_notebooks/MultiBatchExample_ConfiguredAssetFileSystemExample_Pandas.ipynb @@ -107,7 +107,7 @@ "\t\tyellow_trip_data_feb (1 of 1): ['yellow_tripdata_sample_2020-02.csv']\n", "\t\tyellow_trip_data_jan (1 of 1): ['yellow_tripdata_sample_2020-01.csv']\n", " ```\n", - " * **Note**: in this case we actually don't need a `group_name` defined if we are just saying our pattern was `yellow_tripdata_sample_2020-(01)\\\\.csv`. However, GE currently doesn't allow a `pattern` to be defined without `group_name` also defined. So in our case, we set regex defined in `pattern` to capture the `month`." + " * **Note**: in this case we actually don't need a `group_name` defined if we are just saying our pattern was `yellow_tripdata_sample_2020-(01)\\\\.csv`. However, GX currently doesn't allow a `pattern` to be defined without `group_name` also defined. So in our case, we set regex defined in `pattern` to capture the `month`." ] }, { diff --git a/tests/test_fixtures/rule_based_profiler/example_notebooks/MultiBatchExample_ConfiguredAssetFileSystemExample_Spark.ipynb b/tests/test_fixtures/rule_based_profiler/example_notebooks/MultiBatchExample_ConfiguredAssetFileSystemExample_Spark.ipynb index e57fd24eaaff..ece1f81570a9 100644 --- a/tests/test_fixtures/rule_based_profiler/example_notebooks/MultiBatchExample_ConfiguredAssetFileSystemExample_Spark.ipynb +++ b/tests/test_fixtures/rule_based_profiler/example_notebooks/MultiBatchExample_ConfiguredAssetFileSystemExample_Spark.ipynb @@ -133,7 +133,7 @@ "\t\tyellow_trip_data_feb (1 of 1): ['yellow_tripdata_sample_2020-02.csv']\n", "\t\tyellow_trip_data_jan (1 of 1): ['yellow_tripdata_sample_2020-01.csv']\n", " ```\n", - " * **Note**: in this case we actually don't need a `group_name` defined if we are just saying our pattern was `yellow_tripdata_sample_2020-(01)\\\\.csv`. However, GE currently doesn't allow a `pattern` to be defined without `group_name` also defined. So in our case, we set regex defined in `pattern` to capture the `month`." + " * **Note**: in this case we actually don't need a `group_name` defined if we are just saying our pattern was `yellow_tripdata_sample_2020-(01)\\\\.csv`. However, GX currently doesn't allow a `pattern` to be defined without `group_name` also defined. So in our case, we set regex defined in `pattern` to capture the `month`." ] }, { diff --git a/tests/test_fixtures/rule_based_profiler/example_notebooks/great_expectations/uncommitted/config_variables.yml b/tests/test_fixtures/rule_based_profiler/example_notebooks/great_expectations/uncommitted/config_variables.yml index ef73a3fd2752..988fa86fcc81 100644 --- a/tests/test_fixtures/rule_based_profiler/example_notebooks/great_expectations/uncommitted/config_variables.yml +++ b/tests/test_fixtures/rule_based_profiler/example_notebooks/great_expectations/uncommitted/config_variables.yml @@ -3,7 +3,7 @@ # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the great_expectations.yml file, it will attempt # to replace the value of `my_key` with the value from an environment # variable `my_value` or a corresponding key read from this config file, diff --git a/tests/test_fixtures/upgrade_helper/UpgradeHelperV11_basic_upgrade_log.json b/tests/test_fixtures/upgrade_helper/UpgradeHelperV11_basic_upgrade_log.json index 0160587da44d..74a5f41f5445 100644 --- a/tests/test_fixtures/upgrade_helper/UpgradeHelperV11_basic_upgrade_log.json +++ b/tests/test_fixtures/upgrade_helper/UpgradeHelperV11_basic_upgrade_log.json @@ -15,8 +15,8 @@ "validations_store": { "validations_updated": [ { - "src": "file://GE_PROJECT_DIR/uncommitted/validations/diabetic_data/warning/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.json", - "dest": "file://GE_PROJECT_DIR/uncommitted/validations/diabetic_data/warning/20200430T191246.763896Z/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.json" + "src": "file://GX_PROJECT_DIR/uncommitted/validations/diabetic_data/warning/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.json", + "dest": "file://GX_PROJECT_DIR/uncommitted/validations/diabetic_data/warning/20200430T191246.763896Z/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.json" } ], "exceptions": false @@ -26,8 +26,8 @@ "local_site": { "validation_result_pages_updated": [ { - "src": "file://GE_PROJECT_DIR/uncommitted/data_docs/local_site/validations/diabetic_data/warning/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.html", - "dest": "file://GE_PROJECT_DIR/uncommitted/data_docs/local_site/validations/diabetic_data/warning/20200430T191246.763896Z/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.html" + "src": "file://GX_PROJECT_DIR/uncommitted/data_docs/local_site/validations/diabetic_data/warning/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.html", + "dest": "file://GX_PROJECT_DIR/uncommitted/data_docs/local_site/validations/diabetic_data/warning/20200430T191246.763896Z/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.html" } ], "exceptions": false diff --git a/tests/test_fixtures/upgrade_helper/UpgradeHelperV11_basic_upgrade_with_exception_log.json b/tests/test_fixtures/upgrade_helper/UpgradeHelperV11_basic_upgrade_with_exception_log.json index 715ded8c8431..afa1aaf45959 100644 --- a/tests/test_fixtures/upgrade_helper/UpgradeHelperV11_basic_upgrade_with_exception_log.json +++ b/tests/test_fixtures/upgrade_helper/UpgradeHelperV11_basic_upgrade_with_exception_log.json @@ -22,8 +22,8 @@ "validations_store": { "validations_updated": [ { - "src": "file://GE_PROJECT_DIR/uncommitted/validations/diabetic_data/warning/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.json", - "dest": "file://GE_PROJECT_DIR/uncommitted/validations/diabetic_data/warning/20200430T191246.763896Z/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.json" + "src": "file://GX_PROJECT_DIR/uncommitted/validations/diabetic_data/warning/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.json", + "dest": "file://GX_PROJECT_DIR/uncommitted/validations/diabetic_data/warning/20200430T191246.763896Z/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.json" } ], "exceptions": false @@ -33,8 +33,8 @@ "local_site": { "validation_result_pages_updated": [ { - "src": "file://GE_PROJECT_DIR/uncommitted/data_docs/local_site/validations/diabetic_data/warning/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.html", - "dest": "file://GE_PROJECT_DIR/uncommitted/data_docs/local_site/validations/diabetic_data/warning/20200430T191246.763896Z/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.html" + "src": "file://GX_PROJECT_DIR/uncommitted/data_docs/local_site/validations/diabetic_data/warning/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.html", + "dest": "file://GX_PROJECT_DIR/uncommitted/data_docs/local_site/validations/diabetic_data/warning/20200430T191246.763896Z/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.html" } ], "exceptions": false diff --git a/tests/test_fixtures/upgrade_helper/UpgradeHelperV11_manual_steps_upgrade_log.json b/tests/test_fixtures/upgrade_helper/UpgradeHelperV11_manual_steps_upgrade_log.json index 627d23acb2ac..08940f90e90c 100644 --- a/tests/test_fixtures/upgrade_helper/UpgradeHelperV11_manual_steps_upgrade_log.json +++ b/tests/test_fixtures/upgrade_helper/UpgradeHelperV11_manual_steps_upgrade_log.json @@ -35,8 +35,8 @@ "validations_store": { "validations_updated": [ { - "src": "file://GE_PROJECT_DIR/uncommitted/validations/diabetic_data/warning/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.json", - "dest": "file://GE_PROJECT_DIR/uncommitted/validations/diabetic_data/warning/20200430T191246.763896Z/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.json" + "src": "file://GX_PROJECT_DIR/uncommitted/validations/diabetic_data/warning/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.json", + "dest": "file://GX_PROJECT_DIR/uncommitted/validations/diabetic_data/warning/20200430T191246.763896Z/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.json" } ], "exceptions": false @@ -46,8 +46,8 @@ "local_site": { "validation_result_pages_updated": [ { - "src": "file://GE_PROJECT_DIR/uncommitted/data_docs/local_site/validations/diabetic_data/warning/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.html", - "dest": "file://GE_PROJECT_DIR/uncommitted/data_docs/local_site/validations/diabetic_data/warning/20200430T191246.763896Z/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.html" + "src": "file://GX_PROJECT_DIR/uncommitted/data_docs/local_site/validations/diabetic_data/warning/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.html", + "dest": "file://GX_PROJECT_DIR/uncommitted/data_docs/local_site/validations/diabetic_data/warning/20200430T191246.763896Z/20200430T191246.763896Z/c3b4c5df224fef4b1a056a0f3b93aba5.html" } ], "exceptions": false diff --git a/tests/test_fixtures/upgrade_helper/great_expectations_v10_project/uncommitted/config_variables.yml b/tests/test_fixtures/upgrade_helper/great_expectations_v10_project/uncommitted/config_variables.yml index c3d3ef949d2f..e7e3f76f1093 100644 --- a/tests/test_fixtures/upgrade_helper/great_expectations_v10_project/uncommitted/config_variables.yml +++ b/tests/test_fixtures/upgrade_helper/great_expectations_v10_project/uncommitted/config_variables.yml @@ -2,7 +2,7 @@ # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the config file, it will attempt to replace the value # of `my_key` with the value from an environment variable `my_value` or a # corresponding key read from the file specified using diff --git a/tests/test_fixtures/upgrade_helper/great_expectations_v2.yml b/tests/test_fixtures/upgrade_helper/great_expectations_v2.yml index 23d0a0c3672d..264b85f9b698 100644 --- a/tests/test_fixtures/upgrade_helper/great_expectations_v2.yml +++ b/tests/test_fixtures/upgrade_helper/great_expectations_v2.yml @@ -27,7 +27,7 @@ datasources: # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the great_expectations.yml file, it will attempt # to replace the value of `my_key` with the value from an environment # variable `my_value` or a corresponding key read from this config file, diff --git a/tests/test_fixtures/upgrade_helper/great_expectations_v20_project/checkpoints/titanic_checkpoint_0.yml b/tests/test_fixtures/upgrade_helper/great_expectations_v20_project/checkpoints/titanic_checkpoint_0.yml index ec9498a89da1..d59efb19c5d5 100644 --- a/tests/test_fixtures/upgrade_helper/great_expectations_v20_project/checkpoints/titanic_checkpoint_0.yml +++ b/tests/test_fixtures/upgrade_helper/great_expectations_v20_project/checkpoints/titanic_checkpoint_0.yml @@ -14,7 +14,7 @@ validation_operator_name: action_list_operator # Great Expectations in your pipelines easy! batches: - batch_kwargs: - path: GE_PROJECT_DIR/data/Titanic.csv + path: GX_PROJECT_DIR/data/Titanic.csv datasource: Downloads__dir data_asset_name: Titanic expectation_suite_names: # one or more suites may validate against a single batch diff --git a/tests/test_fixtures/upgrade_helper/great_expectations_v20_project/checkpoints/titanic_checkpoint_1.yml b/tests/test_fixtures/upgrade_helper/great_expectations_v20_project/checkpoints/titanic_checkpoint_1.yml index 572bcb243a49..8736c933342b 100644 --- a/tests/test_fixtures/upgrade_helper/great_expectations_v20_project/checkpoints/titanic_checkpoint_1.yml +++ b/tests/test_fixtures/upgrade_helper/great_expectations_v20_project/checkpoints/titanic_checkpoint_1.yml @@ -17,13 +17,13 @@ validation_operator_name: action_list_operator # Great Expectations in your pipelines easy! batches: - batch_kwargs: - path: GE_PROJECT_DIR/data/Titanic.csv + path: GX_PROJECT_DIR/data/Titanic.csv datasource: Downloads__dir data_asset_name: Titanic expectation_suite_names: # one or more suites may validate against a single batch - Titanic.warning - batch_kwargs: - path: GE_PROJECT_DIR/data/Titanic_copy_0.csv + path: GX_PROJECT_DIR/data/Titanic_copy_0.csv datasource: Downloads__dir data_asset_name: Titanic_copy_0 expectation_suite_names: # one or more suites may validate against a single batch diff --git a/tests/test_fixtures/upgrade_helper/great_expectations_v20_project/uncommitted/config_variables.yml b/tests/test_fixtures/upgrade_helper/great_expectations_v20_project/uncommitted/config_variables.yml index c3d3ef949d2f..e7e3f76f1093 100644 --- a/tests/test_fixtures/upgrade_helper/great_expectations_v20_project/uncommitted/config_variables.yml +++ b/tests/test_fixtures/upgrade_helper/great_expectations_v20_project/uncommitted/config_variables.yml @@ -2,7 +2,7 @@ # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the config file, it will attempt to replace the value # of `my_key` with the value from an environment variable `my_value` or a # corresponding key read from the file specified using diff --git a/tests/test_fixtures/upgrade_helper/great_expectations_v20_project_with_v30_configuration_and_no_checkpoints/uncommitted/config_variables.yml b/tests/test_fixtures/upgrade_helper/great_expectations_v20_project_with_v30_configuration_and_no_checkpoints/uncommitted/config_variables.yml index c3d3ef949d2f..e7e3f76f1093 100644 --- a/tests/test_fixtures/upgrade_helper/great_expectations_v20_project_with_v30_configuration_and_no_checkpoints/uncommitted/config_variables.yml +++ b/tests/test_fixtures/upgrade_helper/great_expectations_v20_project_with_v30_configuration_and_no_checkpoints/uncommitted/config_variables.yml @@ -2,7 +2,7 @@ # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the config file, it will attempt to replace the value # of `my_key` with the value from an environment variable `my_value` or a # corresponding key read from the file specified using diff --git a/tests/test_fixtures/upgrade_helper/great_expectations_v20_project_with_v30_configuration_and_v20_checkpoints/checkpoints/titanic_checkpoint_0.yml b/tests/test_fixtures/upgrade_helper/great_expectations_v20_project_with_v30_configuration_and_v20_checkpoints/checkpoints/titanic_checkpoint_0.yml index ec9498a89da1..d59efb19c5d5 100644 --- a/tests/test_fixtures/upgrade_helper/great_expectations_v20_project_with_v30_configuration_and_v20_checkpoints/checkpoints/titanic_checkpoint_0.yml +++ b/tests/test_fixtures/upgrade_helper/great_expectations_v20_project_with_v30_configuration_and_v20_checkpoints/checkpoints/titanic_checkpoint_0.yml @@ -14,7 +14,7 @@ validation_operator_name: action_list_operator # Great Expectations in your pipelines easy! batches: - batch_kwargs: - path: GE_PROJECT_DIR/data/Titanic.csv + path: GX_PROJECT_DIR/data/Titanic.csv datasource: Downloads__dir data_asset_name: Titanic expectation_suite_names: # one or more suites may validate against a single batch diff --git a/tests/test_fixtures/upgrade_helper/great_expectations_v20_project_with_v30_configuration_and_v20_checkpoints/checkpoints/titanic_checkpoint_1.yml b/tests/test_fixtures/upgrade_helper/great_expectations_v20_project_with_v30_configuration_and_v20_checkpoints/checkpoints/titanic_checkpoint_1.yml index 572bcb243a49..8736c933342b 100644 --- a/tests/test_fixtures/upgrade_helper/great_expectations_v20_project_with_v30_configuration_and_v20_checkpoints/checkpoints/titanic_checkpoint_1.yml +++ b/tests/test_fixtures/upgrade_helper/great_expectations_v20_project_with_v30_configuration_and_v20_checkpoints/checkpoints/titanic_checkpoint_1.yml @@ -17,13 +17,13 @@ validation_operator_name: action_list_operator # Great Expectations in your pipelines easy! batches: - batch_kwargs: - path: GE_PROJECT_DIR/data/Titanic.csv + path: GX_PROJECT_DIR/data/Titanic.csv datasource: Downloads__dir data_asset_name: Titanic expectation_suite_names: # one or more suites may validate against a single batch - Titanic.warning - batch_kwargs: - path: GE_PROJECT_DIR/data/Titanic_copy_0.csv + path: GX_PROJECT_DIR/data/Titanic_copy_0.csv datasource: Downloads__dir data_asset_name: Titanic_copy_0 expectation_suite_names: # one or more suites may validate against a single batch diff --git a/tests/test_fixtures/upgrade_helper/great_expectations_v20_project_with_v30_configuration_and_v20_checkpoints/uncommitted/config_variables.yml b/tests/test_fixtures/upgrade_helper/great_expectations_v20_project_with_v30_configuration_and_v20_checkpoints/uncommitted/config_variables.yml index c3d3ef949d2f..e7e3f76f1093 100644 --- a/tests/test_fixtures/upgrade_helper/great_expectations_v20_project_with_v30_configuration_and_v20_checkpoints/uncommitted/config_variables.yml +++ b/tests/test_fixtures/upgrade_helper/great_expectations_v20_project_with_v30_configuration_and_v20_checkpoints/uncommitted/config_variables.yml @@ -2,7 +2,7 @@ # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the config file, it will attempt to replace the value # of `my_key` with the value from an environment variable `my_value` or a # corresponding key read from the file specified using diff --git a/tests/test_fixtures/upgrade_helper/great_expectations_v2_with_v3_configuration_without_checkpoint_store.yml b/tests/test_fixtures/upgrade_helper/great_expectations_v2_with_v3_configuration_without_checkpoint_store.yml index f5278c8891f1..57e11cecf797 100644 --- a/tests/test_fixtures/upgrade_helper/great_expectations_v2_with_v3_configuration_without_checkpoint_store.yml +++ b/tests/test_fixtures/upgrade_helper/great_expectations_v2_with_v3_configuration_without_checkpoint_store.yml @@ -51,7 +51,7 @@ datasources: # secrets out of source control & 2) environment-based configuration changes # such as staging vs prod. # -# When GE encounters substitution syntax (like `my_key: ${my_value}` or +# When GX encounters substitution syntax (like `my_key: ${my_value}` or # `my_key: $my_value`) in the great_expectations.yml file, it will attempt # to replace the value of `my_key` with the value from an environment # variable `my_value` or a corresponding key read from this config file, diff --git a/tests/test_fixtures/upgrade_helper/test_basic_project_upgrade_expected_stdout.fixture b/tests/test_fixtures/upgrade_helper/test_basic_project_upgrade_expected_stdout.fixture index 331b616ce121..8b3d9c42f199 100644 --- a/tests/test_fixtures/upgrade_helper/test_basic_project_upgrade_expected_stdout.fixture +++ b/tests/test_fixtures/upgrade_helper/test_basic_project_upgrade_expected_stdout.fixture @@ -55,7 +55,7 @@ The config_version of your great_expectations.yml has been automatically increme A log detailing the upgrade can be found here: - - GE_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV11_20190926T134241.000000Z.json + - GX_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV11_20190926T134241.000000Z.json ++====================================++ || UpgradeHelperV13: Upgrade Overview || ++====================================++ @@ -103,7 +103,7 @@ The Upgrade Helper has performed the automated upgrade steps as part of upgradin A log detailing the upgrade can be found here: - - GE_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV13_20190926T134241.000000Z.json + - GX_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV13_20190926T134241.000000Z.json ================================================================================ diff --git a/tests/test_fixtures/upgrade_helper/test_basic_project_upgrade_expected_v012_stdout.fixture b/tests/test_fixtures/upgrade_helper/test_basic_project_upgrade_expected_v012_stdout.fixture index f671d290e7ee..c6d3ee92a945 100644 --- a/tests/test_fixtures/upgrade_helper/test_basic_project_upgrade_expected_v012_stdout.fixture +++ b/tests/test_fixtures/upgrade_helper/test_basic_project_upgrade_expected_v012_stdout.fixture @@ -59,7 +59,7 @@ The config_version of your great_expectations.yml has been automatically increme A log detailing the upgrade can be found here: - - GE_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV11_20190926T134241.000000Z.json + - GX_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV11_20190926T134241.000000Z.json ++=====================================================++ || UpgradeHelperV13: Upgrade Overview (V2-API Version) || ++=====================================================++ @@ -108,7 +108,7 @@ The config_version of your great_expectations.yml has been automatically increme A log detailing the upgrade can be found here: - - GE_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV13_20190926T134241.000000Z.json + - GX_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV13_20190926T134241.000000Z.json ================================================================================ diff --git a/tests/test_fixtures/upgrade_helper/test_project_upgrade_with_exception_expected_stdout.fixture b/tests/test_fixtures/upgrade_helper/test_project_upgrade_with_exception_expected_stdout.fixture index dd3987cf5845..8f94b16da345 100644 --- a/tests/test_fixtures/upgrade_helper/test_project_upgrade_with_exception_expected_stdout.fixture +++ b/tests/test_fixtures/upgrade_helper/test_project_upgrade_with_exception_expected_stdout.fixture @@ -56,7 +56,7 @@ as detailed in the 0.11.x migration guide. The upgrade log can be found here: - - GE_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV11_20190926T134241.000000Z.json + - GX_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV11_20190926T134241.000000Z.json ================================================================================ diff --git a/tests/test_fixtures/upgrade_helper/test_project_upgrade_with_exception_expected_v012_stdout.fixture b/tests/test_fixtures/upgrade_helper/test_project_upgrade_with_exception_expected_v012_stdout.fixture index d68c706e9326..e8d1f44b53a0 100644 --- a/tests/test_fixtures/upgrade_helper/test_project_upgrade_with_exception_expected_v012_stdout.fixture +++ b/tests/test_fixtures/upgrade_helper/test_project_upgrade_with_exception_expected_v012_stdout.fixture @@ -60,7 +60,7 @@ as detailed in the 0.11.x migration guide. The upgrade log can be found here: - - GE_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV11_20190926T134241.000000Z.json + - GX_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV11_20190926T134241.000000Z.json ================================================================================ diff --git a/tests/test_fixtures/upgrade_helper/test_project_upgrade_with_manual_steps_expected_stdout.fixture b/tests/test_fixtures/upgrade_helper/test_project_upgrade_with_manual_steps_expected_stdout.fixture index ad4c7f12fd11..b13bda9b42b2 100644 --- a/tests/test_fixtures/upgrade_helper/test_project_upgrade_with_manual_steps_expected_stdout.fixture +++ b/tests/test_fixtures/upgrade_helper/test_project_upgrade_with_manual_steps_expected_stdout.fixture @@ -58,7 +58,7 @@ Upgrading project... The Upgrade Helper has completed the automated upgrade steps. A log detailing the upgrade can be found here: - - GE_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV11_20190926T134241.000000Z.json + - GX_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV11_20190926T134241.000000Z.json ================================================================================ diff --git a/tests/test_fixtures/upgrade_helper/test_project_upgrade_with_manual_steps_expected_v012_stdout.fixture b/tests/test_fixtures/upgrade_helper/test_project_upgrade_with_manual_steps_expected_v012_stdout.fixture index a4ba2416bdb1..f4ee1fcde7bc 100644 --- a/tests/test_fixtures/upgrade_helper/test_project_upgrade_with_manual_steps_expected_v012_stdout.fixture +++ b/tests/test_fixtures/upgrade_helper/test_project_upgrade_with_manual_steps_expected_v012_stdout.fixture @@ -62,7 +62,7 @@ Upgrading project... The Upgrade Helper has completed the automated upgrade steps. A log detailing the upgrade can be found here: - - GE_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV11_20190926T134241.000000Z.json + - GX_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV11_20190926T134241.000000Z.json ================================================================================ diff --git a/tests/test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_expected_v012_stdout.fixture b/tests/test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_expected_v012_stdout.fixture index 82b281b8cf11..8345c06a8a5f 100644 --- a/tests/test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_expected_v012_stdout.fixture +++ b/tests/test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_expected_v012_stdout.fixture @@ -51,5 +51,5 @@ The config_version of your great_expectations.yml has been automatically increme A log detailing the upgrade can be found here: - - GE_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV13_20210119T132639.000000Z.json + - GX_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV13_20210119T132639.000000Z.json Your project is up-to-date - no further upgrade is necessary. diff --git a/tests/test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_with_manual_steps_checkpoints.fixture b/tests/test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_with_manual_steps_checkpoints.fixture index 624c9ba7691b..88b59d626c61 100644 --- a/tests/test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_with_manual_steps_checkpoints.fixture +++ b/tests/test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_with_manual_steps_checkpoints.fixture @@ -48,5 +48,5 @@ The Upgrade Helper has performed the automated upgrade steps as part of upgradin A log detailing the upgrade can be found here: - - GE_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV13_20210119T132639.000000Z.json + - GX_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV13_20210119T132639.000000Z.json Your project requires manual upgrade steps in order to be up-to-date. diff --git a/tests/test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_with_manual_steps_checkpoints_datasources_validation_operators_expected_stdout.fixture b/tests/test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_with_manual_steps_checkpoints_datasources_validation_operators_expected_stdout.fixture index 20353d05b468..29ee1f3cdec8 100644 --- a/tests/test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_with_manual_steps_checkpoints_datasources_validation_operators_expected_stdout.fixture +++ b/tests/test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_with_manual_steps_checkpoints_datasources_validation_operators_expected_stdout.fixture @@ -55,5 +55,5 @@ The Upgrade Helper has performed the automated upgrade steps as part of upgradin A log detailing the upgrade can be found here: - - GE_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV13_20210119T132639.000000Z.json + - GX_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV13_20210119T132639.000000Z.json Your project requires manual upgrade steps in order to be up-to-date. diff --git a/tests/test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_without_manual_steps_expected_stdout.fixture b/tests/test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_without_manual_steps_expected_stdout.fixture index 986861da5777..e8654c304405 100644 --- a/tests/test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_without_manual_steps_expected_stdout.fixture +++ b/tests/test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_without_manual_steps_expected_stdout.fixture @@ -12,5 +12,5 @@ Your project was successfully upgraded to be compatible with Great Expectations A log detailing the upgrade can be found here: - - GE_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV13_20210119T132639.000000Z.json + - GX_PROJECT_DIR/uncommitted/logs/project_upgrades/UpgradeHelperV13_20210119T132639.000000Z.json Your project is up-to-date - no further upgrade is necessary. diff --git a/tests/test_great_expectations.py b/tests/test_great_expectations.py index bca21c7af548..f30c2136f660 100644 --- a/tests/test_great_expectations.py +++ b/tests/test_great_expectations.py @@ -463,7 +463,7 @@ def test_read_json(self): @pytest.mark.skipif( not is_library_loadable(library_name="openpyxl"), - reason="GE uses pandas to read excel files, which requires openpyxl", + reason="GX uses pandas to read excel files, which requires openpyxl", ) def test_read_excel(self): script_path = os.path.dirname(os.path.realpath(__file__)) diff --git a/tests/validator/test_validator.py b/tests/validator/test_validator.py index 1f5de244c2a8..ed8f5f8e9794 100644 --- a/tests/validator/test_validator.py +++ b/tests/validator/test_validator.py @@ -330,8 +330,8 @@ def test_ge_cloud_validator_updates_self_suite_with_ge_cloud_ids_on_save( """ This checks that Validator in ge_cloud_mode properly updates underlying Expectation Suite on save. The multi_batch_taxi_validator_ge_cloud_mode fixture has a suite with a single expectation. - :param mock_context_get_suite: Under normal circumstances, this would be ExpectationSuite object returned from GE Cloud - :param mock_context_save_suite: Under normal circumstances, this would trigger post or patch to GE Cloud + :param mock_context_get_suite: Under normal circumstances, this would be ExpectationSuite object returned from GX Cloud + :param mock_context_save_suite: Under normal circumstances, this would trigger post or patch to GX Cloud """ context: DataContext = empty_data_context_stats_enabled mock_suite = ExpectationSuite(