diff --git a/plugins/core-plugin/src/main/java/com/google/cloud/teleport/plugin/model/ImageSpecParameter.java b/plugins/core-plugin/src/main/java/com/google/cloud/teleport/plugin/model/ImageSpecParameter.java index 979496c892..f1125b33ac 100644 --- a/plugins/core-plugin/src/main/java/com/google/cloud/teleport/plugin/model/ImageSpecParameter.java +++ b/plugins/core-plugin/src/main/java/com/google/cloud/teleport/plugin/model/ImageSpecParameter.java @@ -652,7 +652,7 @@ protected void processDescriptions( this.setHelpText(helpText); if (example != null && !example.isEmpty()) { - this.setHelpText(this.getHelpText() + " (Example: " + example + ")"); + this.setHelpText(this.getHelpText() + " For example, `" + example + "`"); } } } diff --git a/plugins/core-plugin/src/main/resources/README-template.md b/plugins/core-plugin/src/main/resources/README-template.md index a3b01c3af5..2c80e28a2d 100644 --- a/plugins/core-plugin/src/main/resources/README-template.md +++ b/plugins/core-plugin/src/main/resources/README-template.md @@ -21,12 +21,12 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -<#list spec.metadata.parameters as parameter><#if !parameter.optional!false>* **${parameter.name}** : ${parameter.helpText?ensure_ends_with(".")} +<#list spec.metadata.parameters as parameter><#if !parameter.optional!false>* **${parameter.name}**: ${parameter.helpText?ensure_ends_with(".")} ### Optional parameters -<#list spec.metadata.parameters as parameter><#if parameter.optional!false>* **${parameter.name}** : ${parameter.helpText?ensure_ends_with(".")} +<#list spec.metadata.parameters as parameter><#if parameter.optional!false>* **${parameter.name}**: ${parameter.helpText?ensure_ends_with(".")} diff --git a/python/README_Yaml_Template.md b/python/README_Yaml_Template.md index 650ff8b3c0..4d2b7d07bc 100644 --- a/python/README_Yaml_Template.md +++ b/python/README_Yaml_Template.md @@ -25,9 +25,9 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Optional parameters -* **yaml_pipeline** : A yaml description of the pipeline to run. -* **yaml_pipeline_file** : A file in Cloud Storage containing a yaml description of the pipeline to run. -* **jinja_variables** : A json dict of variables used when invoking the jinja preprocessor on the provided yaml pipeline. +* **yaml_pipeline**: A yaml description of the pipeline to run. +* **yaml_pipeline_file**: A file in Cloud Storage containing a yaml description of the pipeline to run. +* **jinja_variables**: A json dict of variables used when invoking the jinja preprocessor on the provided yaml pipeline. diff --git a/v1/README_Bulk_Compress_GCS_Files.md b/v1/README_Bulk_Compress_GCS_Files.md index 8a7aec4625..d1bdce7bd1 100644 --- a/v1/README_Bulk_Compress_GCS_Files.md +++ b/v1/README_Bulk_Compress_GCS_Files.md @@ -27,14 +27,14 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputFilePattern** : The Cloud Storage location of the files you'd like to process. (Example: gs://your-bucket/your-files/*.txt). -* **outputDirectory** : The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse directory path for date & time formatters. (Example: gs://your-bucket/your-path). -* **outputFailureFile** : The error log output file to use for write failures that occur during compression. The contents will be one line for each file which failed compression. Note that this parameter will allow the pipeline to continue processing in the event of a failure. (Example: gs://your-bucket/compressed/failed.csv). -* **compression** : The compression algorithm used to compress the matched files. Valid algorithms: BZIP2, DEFLATE, GZIP. +* **inputFilePattern**: The Cloud Storage location of the files you'd like to process. For example, `gs://your-bucket/your-files/*.txt`. +* **outputDirectory**: The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse directory path for date & time formatters. For example, `gs://your-bucket/your-path`. +* **outputFailureFile**: The error log output file to use for write failures that occur during compression. The contents will be one line for each file which failed compression. Note that this parameter will allow the pipeline to continue processing in the event of a failure. For example, `gs://your-bucket/compressed/failed.csv`. +* **compression**: The compression algorithm used to compress the matched files. Valid algorithms: BZIP2, DEFLATE, GZIP. ### Optional parameters -* **outputFilenameSuffix** : Output filename suffix of the files to write. Defaults to .bzip2, .deflate or .gz depending on the compression algorithm. +* **outputFilenameSuffix**: Output filename suffix of the files to write. Defaults to .bzip2, .deflate or .gz depending on the compression algorithm. @@ -211,9 +211,9 @@ resource "google_dataflow_job" "bulk_compress_gcs_files" { region = var.region temp_gcs_location = "gs://bucket-name-here/temp" parameters = { - inputFilePattern = "gs://your-bucket/your-files/*.txt" - outputDirectory = "gs://your-bucket/your-path" - outputFailureFile = "gs://your-bucket/compressed/failed.csv" + inputFilePattern = "" + outputDirectory = "" + outputFailureFile = "" compression = "" # outputFilenameSuffix = "" } diff --git a/v1/README_Bulk_Decompress_GCS_Files.md b/v1/README_Bulk_Decompress_GCS_Files.md index b885462bdf..b645a00324 100644 --- a/v1/README_Bulk_Decompress_GCS_Files.md +++ b/v1/README_Bulk_Decompress_GCS_Files.md @@ -26,9 +26,9 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputFilePattern** : The Cloud Storage location of the files you'd like to process. (Example: gs://your-bucket/your-files/*.gz). -* **outputDirectory** : The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse directory path for date & time formatters. (Example: gs://your-bucket/decompressed/). -* **outputFailureFile** : The output file to write failures to during the decompression process. If there are no failures, the file will still be created but will be empty. The contents will be one line for each file which failed decompression in CSV format (Filename, Error). Note that this parameter will allow the pipeline to continue processing in the event of a failure. (Example: gs://your-bucket/decompressed/failed.csv). +* **inputFilePattern**: The Cloud Storage location of the files you'd like to process. For example, `gs://your-bucket/your-files/*.gz`. +* **outputDirectory**: The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse directory path for date & time formatters. For example, `gs://your-bucket/decompressed/`. +* **outputFailureFile**: The output file to write failures to during the decompression process. If there are no failures, the file will still be created but will be empty. The contents will be one line for each file which failed decompression in CSV format (Filename, Error). Note that this parameter will allow the pipeline to continue processing in the event of a failure. For example, `gs://your-bucket/decompressed/failed.csv`. ### Optional parameters @@ -202,9 +202,9 @@ resource "google_dataflow_job" "bulk_decompress_gcs_files" { region = var.region temp_gcs_location = "gs://bucket-name-here/temp" parameters = { - inputFilePattern = "gs://your-bucket/your-files/*.gz" - outputDirectory = "gs://your-bucket/decompressed/" - outputFailureFile = "gs://your-bucket/decompressed/failed.csv" + inputFilePattern = "" + outputDirectory = "" + outputFailureFile = "" } } ``` diff --git a/v1/README_Cassandra_To_Cloud_Bigtable.md b/v1/README_Cassandra_To_Cloud_Bigtable.md index f52429a981..24f333bea8 100644 --- a/v1/README_Cassandra_To_Cloud_Bigtable.md +++ b/v1/README_Cassandra_To_Cloud_Bigtable.md @@ -23,21 +23,19 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **cassandraHosts** : The hosts of the Apache Cassandra nodes in a comma-separated list. -* **cassandraKeyspace** : The Apache Cassandra keyspace where the table is located. -* **cassandraTable** : The Apache Cassandra table to copy. -* **bigtableProjectId** : The Google Cloud project ID associated with the Bigtable instance. -* **bigtableInstanceId** : The ID of the Bigtable instance that the Apache Cassandra table is copied to. -* **bigtableTableId** : The name of the Bigtable table that the Apache Cassandra table is copied to. +* **cassandraHosts**: The hosts of the Apache Cassandra nodes in a comma-separated list. +* **cassandraKeyspace**: The Apache Cassandra keyspace where the table is located. +* **cassandraTable**: The Apache Cassandra table to copy. +* **bigtableProjectId**: The Google Cloud project ID associated with the Bigtable instance. +* **bigtableInstanceId**: The ID of the Bigtable instance that the Apache Cassandra table is copied to. +* **bigtableTableId**: The name of the Bigtable table that the Apache Cassandra table is copied to. ### Optional parameters -* **cassandraPort** : The TCP port to use to reach Apache Cassandra on the nodes. The default value is 9042. -* **defaultColumnFamily** : The name of the column family of the Bigtable table. The default value is default. -* **rowKeySeparator** : The separator used to build row-keys. The default value is '#'. -* **splitLargeRows** : The flag for enabling splitting of large rows into multiple MutateRows requests. Note that when a large row is split between multiple API calls, the updates to the row are not atomic. . -* **writetimeCassandraColumnSchema** : GCS path to schema to copy Cassandra writetimes to Bigtable. The command to generate this schema is ```cqlsh -e "select json * from system_schema.columns where keyspace_name='$CASSANDRA_KEYSPACE' and table_name='$CASSANDRA_TABLE'`" > column_schema.json```. Set $WRITETIME_CASSANDRA_COLUMN_SCHEMA to a GCS path, e.g. `gs://$BUCKET_NAME/column_schema.json`. Then upload the schema to GCS: `gcloud storage cp column_schema.json $WRITETIME_CASSANDRA_COLUMN_SCHEMA`. Requires Cassandra version 2.2 onwards for JSON support. -* **setZeroTimestamp** : The flag for setting Bigtable cell timestamp to 0 if Cassandra writetime is not present. The default behavior for when this flag is not set is to set the Bigtable cell timestamp as the template replication time, i.e. now. +* **cassandraPort**: The TCP port to use to reach Apache Cassandra on the nodes. The default value is `9042`. +* **defaultColumnFamily**: The name of the column family of the Bigtable table. The default value is `default`. +* **rowKeySeparator**: The separator used to build row-keys. The default value is `#`. +* **splitLargeRows**: The flag for enabling splitting of large rows into multiple MutateRows requests. Note that when a large row is split between multiple API calls, the updates to the row are not atomic. . diff --git a/v1/README_Cloud_BigQuery_to_Cloud_Datastore.md b/v1/README_Cloud_BigQuery_to_Cloud_Datastore.md index 93517b8105..f98873f127 100644 --- a/v1/README_Cloud_BigQuery_to_Cloud_Datastore.md +++ b/v1/README_Cloud_BigQuery_to_Cloud_Datastore.md @@ -15,17 +15,17 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **readQuery** : A BigQuery SQL query that extracts data from the source. For example, select * from dataset1.sample_table. -* **datastoreWriteProjectId** : The ID of the Google Cloud project to write the Datastore entities to. -* **errorWritePath** : The error log output file to use for write failures that occur during processing. (Example: gs://your-bucket/errors/). +* **readQuery**: A BigQuery SQL query that extracts data from the source. For example, `select * from dataset1.sample_table`. +* **datastoreWriteProjectId**: The ID of the Google Cloud project to write the Datastore entities to. +* **errorWritePath**: The error log output file to use for write failures that occur during processing. For example, `gs://your-bucket/errors/`. ### Optional parameters -* **readIdColumn** : Name of the BigQuery column storing the unique identifier of the row. -* **invalidOutputPath** : Cloud Storage path where to write BigQuery rows that cannot be converted to target entities. (Example: gs://your-bucket/your-path). -* **datastoreWriteEntityKind** : Datastore kind under which entities will be written in the output Google Cloud project. -* **datastoreWriteNamespace** : Datastore namespace under which entities will be written in the output Google Cloud project. -* **datastoreHintNumWorkers** : Hint for the expected number of workers in the Datastore ramp-up throttling step. Default is `500`. +* **readIdColumn**: Name of the BigQuery column storing the unique identifier of the row. +* **invalidOutputPath**: Cloud Storage path where to write BigQuery rows that cannot be converted to target entities. For example, `gs://your-bucket/your-path`. +* **datastoreWriteEntityKind**: Datastore kind under which entities will be written in the output Google Cloud project. +* **datastoreWriteNamespace**: Datastore namespace under which entities will be written in the output Google Cloud project. +* **datastoreHintNumWorkers**: Hint for the expected number of workers in the Datastore ramp-up throttling step. Defaults to `500`. @@ -213,9 +213,9 @@ resource "google_dataflow_job" "cloud_bigquery_to_cloud_datastore" { parameters = { readQuery = "" datastoreWriteProjectId = "" - errorWritePath = "gs://your-bucket/errors/" + errorWritePath = "" # readIdColumn = "" - # invalidOutputPath = "gs://your-bucket/your-path" + # invalidOutputPath = "" # datastoreWriteEntityKind = "" # datastoreWriteNamespace = "" # datastoreHintNumWorkers = "500" diff --git a/v1/README_Cloud_BigQuery_to_GCS_TensorFlow_Records.md b/v1/README_Cloud_BigQuery_to_GCS_TensorFlow_Records.md index 784bcaac9e..c13a6dafe9 100644 --- a/v1/README_Cloud_BigQuery_to_GCS_TensorFlow_Records.md +++ b/v1/README_Cloud_BigQuery_to_GCS_TensorFlow_Records.md @@ -22,17 +22,17 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **readQuery** : A BigQuery SQL query that extracts data from the source. For example, select * from dataset1.sample_table. -* **outputDirectory** : The top-level Cloud Storage path prefix to use when writing the training, testing, and validation TFRecord files. Subdirectories for resulting training, testing, and validation TFRecord files are automatically generated from `outputDirectory`. For example, `gs://mybucket/output/train` (Example: gs://mybucket/output). +* **readQuery**: A BigQuery SQL query that extracts data from the source. For example, `select * from dataset1.sample_table`. +* **outputDirectory**: The top-level Cloud Storage path prefix to use when writing the training, testing, and validation TFRecord files. Subdirectories for resulting training, testing, and validation TFRecord files are automatically generated from `outputDirectory`. For example, `gs://mybucket/output`. ### Optional parameters -* **readIdColumn** : Name of the BigQuery column storing the unique identifier of the row. -* **invalidOutputPath** : Cloud Storage path where to write BigQuery rows that cannot be converted to target entities. (Example: gs://your-bucket/your-path). -* **outputSuffix** : The file suffix for the training, testing, and validation TFRecord files that are written. The default value is `.tfrecord`. -* **trainingPercentage** : The percentage of query data allocated to training TFRecord files. The default value is 1, or 100%. -* **testingPercentage** : The percentage of query data allocated to testing TFRecord files. The default value is 0, or 0%. -* **validationPercentage** : The percentage of query data allocated to validation TFRecord files. The default value is 0, or 0%. +* **readIdColumn**: Name of the BigQuery column storing the unique identifier of the row. +* **invalidOutputPath**: Cloud Storage path where to write BigQuery rows that cannot be converted to target entities. For example, `gs://your-bucket/your-path`. +* **outputSuffix**: The file suffix for the training, testing, and validation TFRecord files that are written. The default value is `.tfrecord`. +* **trainingPercentage**: The percentage of query data allocated to training TFRecord files. The default value is `1`, or `100%`. +* **testingPercentage**: The percentage of query data allocated to testing TFRecord files. The default value is `0`, or `0%`. +* **validationPercentage**: The percentage of query data allocated to validation TFRecord files. The default value is `0`, or `0%`. @@ -219,9 +219,9 @@ resource "google_dataflow_job" "cloud_bigquery_to_gcs_tensorflow_records" { temp_gcs_location = "gs://bucket-name-here/temp" parameters = { readQuery = "" - outputDirectory = "gs://mybucket/output" + outputDirectory = "" # readIdColumn = "" - # invalidOutputPath = "gs://your-bucket/your-path" + # invalidOutputPath = "" # outputSuffix = ".tfrecord" # trainingPercentage = "1.0" # testingPercentage = "0.0" diff --git a/v1/README_Cloud_Bigtable_to_GCS_Avro.md b/v1/README_Cloud_Bigtable_to_GCS_Avro.md index 09d93b37b2..05043a9619 100644 --- a/v1/README_Cloud_Bigtable_to_GCS_Avro.md +++ b/v1/README_Cloud_Bigtable_to_GCS_Avro.md @@ -18,15 +18,15 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **bigtableProjectId** : The ID of the Google Cloud project that contains the Bigtable instance that you want to read data from. -* **bigtableInstanceId** : The ID of the Bigtable instance that contains the table. -* **bigtableTableId** : The ID of the Bigtable table to export. -* **outputDirectory** : The Cloud Storage path where data is written. (Example: gs://mybucket/somefolder). -* **filenamePrefix** : The prefix of the Avro filename. For example, `output-`. Defaults to: part. +* **bigtableProjectId**: The ID of the Google Cloud project that contains the Bigtable instance that you want to read data from. +* **bigtableInstanceId**: The ID of the Bigtable instance that contains the table. +* **bigtableTableId**: The ID of the Bigtable table to export. +* **outputDirectory**: The Cloud Storage path where data is written. For example, `gs://mybucket/somefolder`. +* **filenamePrefix**: The prefix of the Avro filename. For example, `output-`. Defaults to: part. ### Optional parameters -* **bigtableAppProfileId** : The ID of the Bigtable application profile to use for the export. If you don't specify an app profile, Bigtable uses the instance's default app profile: https://cloud.google.com/bigtable/docs/app-profiles#default-app-profile. +* **bigtableAppProfileId**: The ID of the Bigtable application profile to use for the export. If you don't specify an app profile, Bigtable uses the instance's default app profile: https://cloud.google.com/bigtable/docs/app-profiles#default-app-profile. @@ -209,7 +209,7 @@ resource "google_dataflow_job" "cloud_bigtable_to_gcs_avro" { bigtableProjectId = "" bigtableInstanceId = "" bigtableTableId = "" - outputDirectory = "gs://mybucket/somefolder" + outputDirectory = "" filenamePrefix = "part" # bigtableAppProfileId = "default" } diff --git a/v1/README_Cloud_Bigtable_to_GCS_Json.md b/v1/README_Cloud_Bigtable_to_GCS_Json.md index ec793ebf20..f9e132fbfc 100644 --- a/v1/README_Cloud_Bigtable_to_GCS_Json.md +++ b/v1/README_Cloud_Bigtable_to_GCS_Json.md @@ -17,17 +17,17 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **bigtableProjectId** : The ID for the Google Cloud project that contains the Bigtable instance that you want to read data from. -* **bigtableInstanceId** : The ID of the Bigtable instance that contains the table. -* **bigtableTableId** : The ID of the Bigtable table to read from. -* **outputDirectory** : The Cloud Storage path where the output JSON files are stored. (Example: gs://your-bucket/your-path/). +* **bigtableProjectId**: The ID for the Google Cloud project that contains the Bigtable instance that you want to read data from. +* **bigtableInstanceId**: The ID of the Bigtable instance that contains the table. +* **bigtableTableId**: The ID of the Bigtable table to read from. +* **filenamePrefix**: The prefix of the JSON file name. For example, `table1-`. If no value is provided, defaults to `part`. ### Optional parameters -* **filenamePrefix** : The prefix of the JSON file name. For example, "table1-". If no value is provided, defaults to `part`. -* **userOption** : Possible values are `FLATTEN` or `NONE`. `FLATTEN` flattens the row to the single level. `NONE` stores the whole row as a JSON string. Defaults to `NONE`. -* **columnsAliases** : A comma-separated list of columns that are required for the Vertex AI Vector Search index. The columns `id` and `embedding` are required for Vertex AI Vector Search. You can use the notation `fromfamily:fromcolumn;to`. For example, if the columns are `rowkey` and `cf:my_embedding`, where `rowkey` has a different name than the embedding column, specify `cf:my_embedding;embedding` and, `rowkey;id`. Only use this option when the value for `userOption` is `FLATTEN`. -* **bigtableAppProfileId** : The ID of the Bigtable application profile to use for the export. If you don't specify an app profile, Bigtable uses the instance's default app profile: https://cloud.google.com/bigtable/docs/app-profiles#default-app-profile. +* **outputDirectory**: The Cloud Storage path where the output JSON files are stored. For example, `gs://your-bucket/your-path/`. +* **userOption**: Possible values are `FLATTEN` or `NONE`. `FLATTEN` flattens the row to the single level. `NONE` stores the whole row as a JSON string. Defaults to `NONE`. +* **columnsAliases**: A comma-separated list of columns that are required for the Vertex AI Vector Search index. The columns `id` and `embedding` are required for Vertex AI Vector Search. You can use the notation `fromfamily:fromcolumn;to`. For example, if the columns are `rowkey` and `cf:my_embedding`, where `rowkey` has a different name than the embedding column, specify `cf:my_embedding;embedding` and, `rowkey;id`. Only use this option when the value for `userOption` is `FLATTEN`. +* **bigtableAppProfileId**: The ID of the Bigtable application profile to use for the export. If you don't specify an app profile, Bigtable uses the instance's default app profile: https://cloud.google.com/bigtable/docs/app-profiles#default-app-profile. @@ -216,8 +216,8 @@ resource "google_dataflow_job" "cloud_bigtable_to_gcs_json" { bigtableProjectId = "" bigtableInstanceId = "" bigtableTableId = "" - outputDirectory = "gs://your-bucket/your-path/" - # filenamePrefix = "part" + filenamePrefix = "part" + # outputDirectory = "" # userOption = "NONE" # columnsAliases = "" # bigtableAppProfileId = "default" diff --git a/v1/README_Cloud_Bigtable_to_GCS_Parquet.md b/v1/README_Cloud_Bigtable_to_GCS_Parquet.md index cd005cd87d..b813769be2 100644 --- a/v1/README_Cloud_Bigtable_to_GCS_Parquet.md +++ b/v1/README_Cloud_Bigtable_to_GCS_Parquet.md @@ -18,16 +18,16 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **bigtableProjectId** : The ID of the Google Cloud project that contains the Cloud Bigtable instance that you want to read data from. -* **bigtableInstanceId** : The ID of the Cloud Bigtable instance that contains the table. -* **bigtableTableId** : The ID of the Cloud Bigtable table to export. -* **outputDirectory** : The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse the directory path for date and time formatters. For example: gs://your-bucket/your-path. -* **filenamePrefix** : The prefix of the Parquet file name. For example, "table1-". Defaults to: part. +* **bigtableProjectId**: The ID of the Google Cloud project that contains the Cloud Bigtable instance that you want to read data from. +* **bigtableInstanceId**: The ID of the Cloud Bigtable instance that contains the table. +* **bigtableTableId**: The ID of the Cloud Bigtable table to export. +* **outputDirectory**: The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse the directory path for date and time formatters. For example: `gs://your-bucket/your-path`. +* **filenamePrefix**: The prefix of the Parquet file name. For example, `table1-`. Defaults to: `part`. ### Optional parameters -* **numShards** : The maximum number of output shards produced when writing. A higher number of shards means higher throughput for writing to Cloud Storage, but potentially higher data aggregation cost across shards when processing output Cloud Storage files. The default value is decided by Dataflow. -* **bigtableAppProfileId** : The ID of the Bigtable application profile to use for the export. If you don't specify an app profile, Bigtable uses the instance's default app profile: https://cloud.google.com/bigtable/docs/app-profiles#default-app-profile. +* **numShards**: The maximum number of output shards produced when writing. A higher number of shards means higher throughput for writing to Cloud Storage, but potentially higher data aggregation cost across shards when processing output Cloud Storage files. The default value is decided by Dataflow. +* **bigtableAppProfileId**: The ID of the Bigtable application profile to use for the export. If you don't specify an app profile, Bigtable uses the instance's default app profile: https://cloud.google.com/bigtable/docs/app-profiles#default-app-profile. diff --git a/v1/README_Cloud_Bigtable_to_GCS_SequenceFile.md b/v1/README_Cloud_Bigtable_to_GCS_SequenceFile.md index 47c4c7b4ea..86e5072e87 100644 --- a/v1/README_Cloud_Bigtable_to_GCS_SequenceFile.md +++ b/v1/README_Cloud_Bigtable_to_GCS_SequenceFile.md @@ -19,19 +19,19 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **bigtableProject** : The ID of the Google Cloud project that contains the Bigtable instance that you want to read data from. -* **bigtableInstanceId** : The ID of the Bigtable instance that contains the table. -* **bigtableTableId** : The ID of the Bigtable table to export. -* **destinationPath** : The Cloud Storage path where data is written. (Example: gs://your-bucket/your-path/). -* **filenamePrefix** : The prefix of the SequenceFile filename. (Example: output-). +* **bigtableProject**: The ID of the Google Cloud project that contains the Bigtable instance that you want to read data from. +* **bigtableInstanceId**: The ID of the Bigtable instance that contains the table. +* **bigtableTableId**: The ID of the Bigtable table to export. +* **destinationPath**: The Cloud Storage path where data is written. For example, `gs://your-bucket/your-path/`. +* **filenamePrefix**: The prefix of the SequenceFile filename. For example, `output-`. ### Optional parameters -* **bigtableAppProfileId** : The ID of the Bigtable application profile to use for the export. If you don't specify an app profile, Bigtable uses the instance's default app profile: https://cloud.google.com/bigtable/docs/app-profiles#default-app-profile. -* **bigtableStartRow** : The row where to start the export from, defaults to the first row. -* **bigtableStopRow** : The row where to stop the export, defaults to the last row. -* **bigtableMaxVersions** : Maximum number of cell versions. Defaults to: 2147483647. -* **bigtableFilter** : Filter string. See: http://hbase.apache.org/book.html#thrift. Defaults to empty. +* **bigtableAppProfileId**: The ID of the Bigtable application profile to use for the export. If you don't specify an app profile, Bigtable uses the instance's default app profile: https://cloud.google.com/bigtable/docs/app-profiles#default-app-profile. +* **bigtableStartRow**: The row where to start the export from, defaults to the first row. +* **bigtableStopRow**: The row where to stop the export, defaults to the last row. +* **bigtableMaxVersions**: Maximum number of cell versions. Defaults to: 2147483647. +* **bigtableFilter**: Filter string. See: http://hbase.apache.org/book.html#thrift. Defaults to empty. @@ -226,8 +226,8 @@ resource "google_dataflow_job" "cloud_bigtable_to_gcs_sequencefile" { bigtableProject = "" bigtableInstanceId = "" bigtableTableId = "" - destinationPath = "gs://your-bucket/your-path/" - filenamePrefix = "output-" + destinationPath = "" + filenamePrefix = "" # bigtableAppProfileId = "" # bigtableStartRow = "" # bigtableStopRow = "" diff --git a/v1/README_Cloud_Bigtable_to_Vector_Embeddings.md b/v1/README_Cloud_Bigtable_to_Vector_Embeddings.md index dc31ca96bc..adc8e75d0e 100644 --- a/v1/README_Cloud_Bigtable_to_Vector_Embeddings.md +++ b/v1/README_Cloud_Bigtable_to_Vector_Embeddings.md @@ -18,24 +18,24 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **bigtableProjectId** : The ID for the Google Cloud project that contains the Bigtable instance that you want to read data from. -* **bigtableInstanceId** : The ID of the Bigtable instance that contains the table. -* **bigtableTableId** : The ID of the Bigtable table to read from. -* **outputDirectory** : The Cloud Storage path where the output JSON files are stored. (Example: gs://your-bucket/your-path/). -* **idColumn** : The fully qualified column name where the ID is stored. In the format cf:col or _key. -* **embeddingColumn** : The fully qualified column name where the embeddings are stored. In the format cf:col or _key. +* **bigtableProjectId**: The ID for the Google Cloud project that contains the Bigtable instance that you want to read data from. +* **bigtableInstanceId**: The ID of the Bigtable instance that contains the table. +* **bigtableTableId**: The ID of the Bigtable table to read from. +* **filenamePrefix**: The prefix of the JSON filename. For example: `table1-`. If no value is provided, defaults to `part`. +* **idColumn**: The fully qualified column name where the ID is stored. In the format `cf:col` or `_key`. +* **embeddingColumn**: The fully qualified column name where the embeddings are stored. In the format `cf:col` or `_key`. ### Optional parameters -* **filenamePrefix** : The prefix of the JSON filename. For example: "table1-". If no value is provided, defaults to "part". -* **crowdingTagColumn** : The fully qualified column name where the crowding tag is stored. In the format cf:col or _key. -* **embeddingByteSize** : The byte size of each entry in the embeddings array. For float, use the value 4. For double, use the value 8. Defaults to 4. -* **allowRestrictsMappings** : The comma-separated, fully qualified column names for the columns to use as the allow restricts, with their aliases. In the format cf:col->alias. -* **denyRestrictsMappings** : The comma-separated, fully qualified column names for the columns to use as the deny restricts, with their aliases. In the format cf:col->alias. -* **intNumericRestrictsMappings** : The comma-separated, fully qualified column names of the columns to use as integer numeric_restricts, with their aliases. In the format cf:col->alias. -* **floatNumericRestrictsMappings** : The comma-separated, fully qualified column names of the columns to use as float (4 bytes) numeric_restricts, with their aliases. In the format cf:col->alias. -* **doubleNumericRestrictsMappings** : The comma-separated, fully qualified column names of the columns to use as double (8 bytes) numeric_restricts, with their aliases. In the format cf:col->alias. -* **bigtableAppProfileId** : The ID of the Cloud Bigtable app profile to be used for the export. Defaults to: default. +* **outputDirectory**: The Cloud Storage path where the output JSON files are stored. For example, `gs://your-bucket/your-path/`. +* **crowdingTagColumn**: The fully qualified column name where the crowding tag is stored. In the format `cf:col` or `_key`. +* **embeddingByteSize**: The byte size of each entry in the embeddings array. For float, use the value `4`. For double, use the value `8`. Defaults to `4`. +* **allowRestrictsMappings**: The comma-separated, fully qualified column names for the columns to use as the allow restricts, with their aliases. In the format `cf:col->alias`. +* **denyRestrictsMappings**: The comma-separated, fully qualified column names for the columns to use as the deny restricts, with their aliases. In the format `cf:col->alias`. +* **intNumericRestrictsMappings**: The comma-separated, fully qualified column names of the columns to use as integer numeric_restricts, with their aliases. In the format `cf:col->alias`. +* **floatNumericRestrictsMappings**: The comma-separated, fully qualified column names of the columns to use as float (4 bytes) numeric_restricts, with their aliases. In the format `cf:col->alias`. +* **doubleNumericRestrictsMappings**: The comma-separated, fully qualified column names of the columns to use as double (8 bytes) numeric_restricts, with their aliases. In the format `cf:col->alias`. +* **bigtableAppProfileId**: The ID of the Cloud Bigtable app profile to be used for the export. Defaults to: default. @@ -248,7 +248,7 @@ resource "google_dataflow_job" "cloud_bigtable_to_vector_embeddings" { outputDirectory = "gs://your-bucket/your-path/" idColumn = "" embeddingColumn = "" - # filenamePrefix = "part" + # outputDirectory = "" # crowdingTagColumn = "" # embeddingByteSize = "4" # allowRestrictsMappings = "" diff --git a/v1/README_Cloud_PubSub_to_Avro.md b/v1/README_Cloud_PubSub_to_Avro.md index 1ba243aa6b..feb93d6ef7 100644 --- a/v1/README_Cloud_PubSub_to_Avro.md +++ b/v1/README_Cloud_PubSub_to_Avro.md @@ -18,20 +18,20 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputTopic** : The Pub/Sub topic to subscribe to for message consumption. The topic name must be in the format projects//topics/. -* **outputDirectory** : The output directory where output Avro files are archived. Must contain / at the end. For example: gs://example-bucket/example-directory/. -* **avroTempDirectory** : The directory for temporary Avro files. Must contain / at the end. For example: gs://example-bucket/example-directory/. +* **inputTopic**: The Pub/Sub topic to subscribe to for message consumption. The topic name must be in the format `projects//topics/`. +* **outputDirectory**: The output directory where output Avro files are archived. Must contain `/` at the end. For example: `gs://example-bucket/example-directory/`. +* **avroTempDirectory**: The directory for temporary Avro files. Must contain `/` at the end. For example: `gs://example-bucket/example-directory/`. ### Optional parameters -* **outputFilenamePrefix** : The output filename prefix for the Avro files. Defaults to: output. -* **outputFilenameSuffix** : The output filename suffix for the Avro files. Defaults to empty. -* **outputShardTemplate** : The shard template defines the dynamic portion of each windowed file. By default, the pipeline uses a single shard for output to the file system within each window. Therefore, all data outputs into a single file per window. The `outputShardTemplate` defaults `to W-P-SS-of-NN`, where `W` is the window date range, `P` is the pane info, `S` is the shard number, and `N` is the number of shards. In case of a single file, the `SS-of-NN` portion of the `outputShardTemplate` is `00-of-01`. -* **yearPattern** : Pattern for formatting the year. Must be one or more of `y` or `Y`. Case makes no difference in the year. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory ('/') character. Defaults to `YYYY`. -* **monthPattern** : Pattern for formatting the month. Must be one or more of the `M` character. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory ('/') character. Defaults to `MM`. -* **dayPattern** : Pattern for formatting the day. Must be one or more of `d` for day of month or `D` for day of year. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory ('/') character. Defaults to `dd`. -* **hourPattern** : Pattern for formatting the hour. Must be one or more of the `H` character. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory ('/') character. Defaults to `HH`. -* **minutePattern** : Pattern for formatting the minute. Must be one or more of the `m` character. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory ('/') character. Defaults to `mm`. +* **outputFilenamePrefix**: The output filename prefix for the Avro files. Defaults to: output. +* **outputFilenameSuffix**: The output filename suffix for the Avro files. Defaults to empty. +* **outputShardTemplate**: The shard template defines the dynamic portion of each windowed file. By default, the pipeline uses a single shard for output to the file system within each window. Therefore, all data outputs into a single file per window. The `outputShardTemplate` defaults `to W-P-SS-of-NN`, where `W` is the window date range, `P` is the pane info, `S` is the shard number, and `N` is the number of shards. In case of a single file, the `SS-of-NN` portion of the `outputShardTemplate` is `00-of-01`. +* **yearPattern**: Pattern for formatting the year. Must be one or more of `y` or `Y`. Case makes no difference in the year. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory (`/`) character. Defaults to `YYYY`. +* **monthPattern**: Pattern for formatting the month. Must be one or more of the `M` character. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory (`/`) character. Defaults to `MM`. +* **dayPattern**: Pattern for formatting the day. Must be one or more of `d` for day of month or `D` for day of year. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory (`/`) character. Defaults to `dd`. +* **hourPattern**: Pattern for formatting the hour. Must be one or more of the `H` character. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory (`/`) character. Defaults to `HH`. +* **minutePattern**: Pattern for formatting the minute. Must be one or more of the `m` character. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory (`/`) character. Defaults to `mm`. diff --git a/v1/README_Cloud_PubSub_to_Cloud_PubSub.md b/v1/README_Cloud_PubSub_to_Cloud_PubSub.md index 6466412598..6158092673 100644 --- a/v1/README_Cloud_PubSub_to_Cloud_PubSub.md +++ b/v1/README_Cloud_PubSub_to_Cloud_PubSub.md @@ -21,13 +21,13 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputSubscription** : The Pub/Sub subscription to read the input from. (Example: projects/your-project-id/subscriptions/your-subscription-name). -* **outputTopic** : The Pub/Sub topic to write the output to. (Example: projects/your-project-id/topics/your-topic-name). +* **inputSubscription**: The Pub/Sub subscription to read the input from. For example, `projects/your-project-id/subscriptions/your-subscription-name`. +* **outputTopic**: The Pub/Sub topic to write the output to. For example, `projects/your-project-id/topics/your-topic-name`. ### Optional parameters -* **filterKey** : The attribute key to use to filter events. No filters are applied if `filterKey` is not specified. -* **filterValue** : The attribute value to use to filter events when a `filterKey` is provided. By default, a null `filterValue` is used. +* **filterKey**: The attribute key to use to filter events. No filters are applied if `filterKey` is not specified. +* **filterValue**: The attribute value to use to filter events when a `filterKey` is provided. By default, a null `filterValue` is used. @@ -201,8 +201,8 @@ resource "google_dataflow_job" "cloud_pubsub_to_cloud_pubsub" { region = var.region temp_gcs_location = "gs://bucket-name-here/temp" parameters = { - inputSubscription = "projects/your-project-id/subscriptions/your-subscription-name" - outputTopic = "projects/your-project-id/topics/your-topic-name" + inputSubscription = "" + outputTopic = "" # filterKey = "" # filterValue = "" } diff --git a/v1/README_Cloud_PubSub_to_Datadog.md b/v1/README_Cloud_PubSub_to_Datadog.md index 5fc7099e00..4ac0c6597f 100644 --- a/v1/README_Cloud_PubSub_to_Datadog.md +++ b/v1/README_Cloud_PubSub_to_Datadog.md @@ -4,9 +4,7 @@ Pub/Sub to Datadog template The Pub/Sub to Datadog template is a streaming pipeline that reads messages from a Pub/Sub subscription and writes the message payload to Datadog by using a Datadog endpoint. The most common use case for this template is to export log -files to Datadog. For more information check out Datadog's -log collection process. +files to Datadog. Before writing to Datadog, you can apply a JavaScript user-defined function to the message payload. Any messages that experience processing failures are @@ -33,22 +31,22 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputSubscription** : The Pub/Sub subscription to read the input from. (Example: projects/your-project-id/subscriptions/your-subscription-name). -* **url** : The Datadog Logs API URL. This URL must be routable from the VPC that the pipeline runs in. See Send logs (https://docs.datadoghq.com/api/latest/logs/#send-logs) in the Datadog documentation for more information. (Example: https://http-intake.logs.datadoghq.com). -* **outputDeadletterTopic** : The Pub/Sub topic to forward undeliverable messages to. For example, projects//topics/. +* **inputSubscription**: The Pub/Sub subscription to read the input from. For example, `projects/your-project-id/subscriptions/your-subscription-name`. +* **url**: The Datadog Logs API URL. This URL must be routable from the VPC that the pipeline runs in. See Send logs (https://docs.datadoghq.com/api/latest/logs/#send-logs) in the Datadog documentation for more information. For example, `https://http-intake.logs.datadoghq.com`. +* **outputDeadletterTopic**: The Pub/Sub topic to forward undeliverable messages to. For example, `projects//topics/`. ### Optional parameters -* **apiKey** : The Datadog API key. You must provide this value if the `apiKeySource` is set to `PLAINTEXT` or `KMS`. For more information, see API and Application Keys (https://docs.datadoghq.com/account_management/api-app-keys/) in the Datadog documentation. -* **batchCount** : The batch size for sending multiple events to Datadog. The default is `1` (no batching). -* **parallelism** : The maximum number of parallel requests. The default is `1` (no parallelism). -* **includePubsubMessage** : Whether to include the full Pub/Sub message in the payload. The default is `true` (all elements, including the data element, are included in the payload). -* **apiKeyKMSEncryptionKey** : The Cloud KMS key to use to decrypt the API Key. You must provide this parameter if the `apiKeySource` is set to `KMS`. If the Cloud KMS key is provided, you must pass in an encrypted API Key. (Example: projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name). -* **apiKeySecretId** : The Secret Manager secret ID for the API Key. You must provide this parameter if the `apiKeySource` is set to `SECRET_MANAGER`. (Example: projects/your-project-id/secrets/your-secret/versions/your-secret-version). -* **apiKeySource** : The source of the API key. The following values are supported: `PLAINTEXT`, `KMS`, and `SECRET_MANAGER`. You must provide this parameter if you're using Secret Manager. If `apiKeySource` is set to `KMS`, you must also provide `apiKeyKMSEncryptionKey` and encrypted `API Key`. If `apiKeySource` is set to `SECRET_MANAGER`, you must also provide `apiKeySecretId`. If `apiKeySource` is set to `PLAINTEXT`, you must also provide `apiKey`. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **javascriptTextTransformReloadIntervalMinutes** : Define the interval that workers may check for JavaScript UDF changes to reload the files. Defaults to: 0. +* **apiKey**: The Datadog API key. You must provide this value if the `apiKeySource` is set to `PLAINTEXT` or `KMS`. For more information, see API and Application Keys (https://docs.datadoghq.com/account_management/api-app-keys/) in the Datadog documentation. +* **batchCount**: The batch size for sending multiple events to Datadog. The default is `1` (no batching). +* **parallelism**: The maximum number of parallel requests. The default is `1` (no parallelism). +* **includePubsubMessage**: Whether to include the full Pub/Sub message in the payload. The default is `true` (all elements, including the data element, are included in the payload). +* **apiKeyKMSEncryptionKey**: The Cloud KMS key to use to decrypt the API Key. You must provide this parameter if the `apiKeySource` is set to `KMS`. If the Cloud KMS key is provided, you must pass in an encrypted API Key. For example, `projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name`. +* **apiKeySecretId**: The Secret Manager secret ID for the API Key. You must provide this parameter if the `apiKeySource` is set to `SECRET_MANAGER`. For example, `projects/your-project-id/secrets/your-secret/versions/your-secret-version`. +* **apiKeySource**: The source of the API key. The following values are supported: `PLAINTEXT`, `KMS`, and `SECRET_MANAGER`. You must provide this parameter if you're using Secret Manager. If `apiKeySource` is set to `KMS`, you must also provide `apiKeyKMSEncryptionKey` and encrypted `API Key`. If `apiKeySource` is set to `SECRET_MANAGER`, you must also provide `apiKeySecretId`. If `apiKeySource` is set to `PLAINTEXT`, you must also provide `apiKey`. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **javascriptTextTransformReloadIntervalMinutes**: Define the interval that workers may check for JavaScript UDF changes to reload the files. Defaults to: 0. ## User-Defined functions (UDFs) @@ -259,15 +257,15 @@ resource "google_dataflow_job" "cloud_pubsub_to_datadog" { region = var.region temp_gcs_location = "gs://bucket-name-here/temp" parameters = { - inputSubscription = "projects/your-project-id/subscriptions/your-subscription-name" - url = "https://http-intake.logs.datadoghq.com" + inputSubscription = "" + url = "" outputDeadletterTopic = "" # apiKey = "" # batchCount = "" # parallelism = "" # includePubsubMessage = "true" - # apiKeyKMSEncryptionKey = "projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name" - # apiKeySecretId = "projects/your-project-id/secrets/your-secret/versions/your-secret-version" + # apiKeyKMSEncryptionKey = "" + # apiKeySecretId = "" # apiKeySource = "" # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" diff --git a/v1/README_Cloud_PubSub_to_GCS_Text.md b/v1/README_Cloud_PubSub_to_GCS_Text.md index 967c31dd2a..dab723395f 100644 --- a/v1/README_Cloud_PubSub_to_GCS_Text.md +++ b/v1/README_Cloud_PubSub_to_GCS_Text.md @@ -19,20 +19,20 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **outputDirectory** : The path and filename prefix for writing output files. For example, `gs://bucket-name/path/`. This value must end in a slash. -* **outputFilenamePrefix** : The prefix to place on each windowed file. For example, `output-`. Defaults to: output. +* **outputDirectory**: The path and filename prefix for writing output files. For example, `gs://bucket-name/path/`. This value must end in a slash. +* **outputFilenamePrefix**: The prefix to place on each windowed file. For example, `output-`. Defaults to: output. ### Optional parameters -* **inputTopic** : The Pub/Sub topic to read the input from. The topic name should be in the format `projects//topics/`. -* **userTempLocation** : The user provided directory to output temporary files to. Must end with a slash. -* **outputFilenameSuffix** : The suffix to place on each windowed file. Typically a file extension such as `.txt` or `.csv`. Defaults to empty. -* **outputShardTemplate** : The shard template defines the dynamic portion of each windowed file. By default, the pipeline uses a single shard for output to the file system within each window. Therefore, all data outputs into a single file per window. The `outputShardTemplate` defaults `to W-P-SS-of-NN`, where `W` is the window date range, `P` is the pane info, `S` is the shard number, and `N` is the number of shards. In case of a single file, the `SS-of-NN` portion of the `outputShardTemplate` is `00-of-01`. -* **yearPattern** : Pattern for formatting the year. Must be one or more of `y` or `Y`. Case makes no difference in the year. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory ('/') character. Defaults to `YYYY`. -* **monthPattern** : Pattern for formatting the month. Must be one or more of the `M` character. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory ('/') character. Defaults to `MM`. -* **dayPattern** : Pattern for formatting the day. Must be one or more of `d` for day of month or `D` for day of year. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory ('/') character. Defaults to `dd`. -* **hourPattern** : Pattern for formatting the hour. Must be one or more of the `H` character. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory ('/') character. Defaults to `HH`. -* **minutePattern** : Pattern for formatting the minute. Must be one or more of the `m` character. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory ('/') character. Defaults to `mm`. +* **inputTopic**: The Pub/Sub topic to read the input from. The topic name should be in the format `projects//topics/`. +* **userTempLocation**: The user provided directory to output temporary files to. Must end with a slash. +* **outputFilenameSuffix**: The suffix to place on each windowed file. Typically a file extension such as `.txt` or `.csv`. Defaults to empty. +* **outputShardTemplate**: The shard template defines the dynamic portion of each windowed file. By default, the pipeline uses a single shard for output to the file system within each window. Therefore, all data outputs into a single file per window. The `outputShardTemplate` defaults `to W-P-SS-of-NN`, where `W` is the window date range, `P` is the pane info, `S` is the shard number, and `N` is the number of shards. In case of a single file, the `SS-of-NN` portion of the `outputShardTemplate` is `00-of-01`. +* **yearPattern**: Pattern for formatting the year. Must be one or more of `y` or `Y`. Case makes no difference in the year. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory (`/`) character. Defaults to `YYYY`. +* **monthPattern**: Pattern for formatting the month. Must be one or more of the `M` character. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory (`/`) character. Defaults to `MM`. +* **dayPattern**: Pattern for formatting the day. Must be one or more of `d` for day of month or `D` for day of year. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory (`/`) character. Defaults to `dd`. +* **hourPattern**: Pattern for formatting the hour. Must be one or more of the `H` character. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory (`/`) character. Defaults to `HH`. +* **minutePattern**: Pattern for formatting the minute. Must be one or more of the `m` character. Optionally, wrap the pattern with characters that aren't alphanumeric or the directory (`/`) character. Defaults to `mm`. diff --git a/v1/README_Cloud_PubSub_to_Splunk.md b/v1/README_Cloud_PubSub_to_Splunk.md index 45567991cf..c3bceeb015 100644 --- a/v1/README_Cloud_PubSub_to_Splunk.md +++ b/v1/README_Cloud_PubSub_to_Splunk.md @@ -33,26 +33,26 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputSubscription** : The Pub/Sub subscription to read the input from. (Example: projects/your-project-id/subscriptions/your-subscription-name). -* **url** : The Splunk HEC URL. The URL must be routable from the VPC that the pipeline runs in. (Example: https://splunk-hec-host:8088). -* **outputDeadletterTopic** : The Pub/Sub topic to forward undeliverable messages to. For example, projects//topics/. +* **inputSubscription**: The Pub/Sub subscription to read the input from. For example, `projects/your-project-id/subscriptions/your-subscription-name`. +* **url**: The Splunk HEC URL. The URL must be routable from the VPC that the pipeline runs in. For example, `https://splunk-hec-host:8088`. +* **outputDeadletterTopic**: The Pub/Sub topic to forward undeliverable messages to. For example, `projects//topics/`. ### Optional parameters -* **token** : The Splunk HEC authentication token. Must be provided if the `tokenSource` parameter is set to `PLAINTEXT` or `KMS`. -* **batchCount** : The batch size for sending multiple events to Splunk. Defaults to 1 (no batching). -* **disableCertificateValidation** : Disable SSL certificate validation. Default false (validation enabled). If true, the certificates are not validated (all certificates are trusted) and `rootCaCertificatePath` parameter is ignored. -* **parallelism** : The maximum number of parallel requests. Defaults to 1 (no parallelism). -* **includePubsubMessage** : Include the full Pub/Sub message in the payload. Default false (only the data element is included in the payload). -* **tokenKMSEncryptionKey** : The Cloud KMS key to use to decrypt the HEC token string. This parameter must be provided when tokenSource is set to KMS. If the Cloud KMS key is provided, the HEC token string `must` be passed in encrypted. (Example: projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name). -* **tokenSecretId** : The Secret Manager secret ID for the token. This parameter must provided when the tokenSource is set to SECRET_MANAGER. (Example: projects/your-project-id/secrets/your-secret/versions/your-secret-version). -* **tokenSource** : The source of the token. The following values are allowed: `PLAINTEXT`, `KMS`, and `SECRET_MANAGER`. You must provide this parameter when Secret Manager is used. If `tokenSource` is set to `KMS`, `tokenKMSEncryptionKey`, and encrypted, then `token` must be provided. If `tokenSource` is set to `SECRET_MANAGER`, then `tokenSecretId` must be provided. If `tokenSource` is set to `PLAINTEXT`, then `token` must be provided. -* **rootCaCertificatePath** : The full URL to the root CA certificate in Cloud Storage. The certificate provided in Cloud Storage must be DER-encoded and can be supplied in binary or printable (Base64) encoding. If the certificate is provided in Base64 encoding, it must be bounded at the beginning by -----BEGIN CERTIFICATE-----, and must be bounded at the end by -----END CERTIFICATE-----. If this parameter is provided, this private CA certificate file is fetched and added to the Dataflow worker's trust store in order to verify the Splunk HEC endpoint's SSL certificate. If this parameter is not provided, the default trust store is used. (Example: gs://mybucket/mycerts/privateCA.crt). -* **enableBatchLogs** : Specifies whether logs should be enabled for batches written to Splunk. Default: `true`. -* **enableGzipHttpCompression** : Specifies whether HTTP requests sent to Splunk HEC should be compressed (gzip content encoded). Default: `true`. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **javascriptTextTransformReloadIntervalMinutes** : Define the interval that workers may check for JavaScript UDF changes to reload the files. Defaults to: 0. +* **token**: The Splunk HEC authentication token. Must be provided if the `tokenSource` parameter is set to `PLAINTEXT` or `KMS`. +* **batchCount**: The batch size for sending multiple events to Splunk. Defaults to `1` (no batching). +* **disableCertificateValidation**: Disable SSL certificate validation. Default `false` (validation enabled). If `true`, the certificates are not validated (all certificates are trusted) and `rootCaCertificatePath` parameter is ignored. +* **parallelism**: The maximum number of parallel requests. Defaults to `1` (no parallelism). +* **includePubsubMessage**: Include the full Pub/Sub message in the payload. Default `false` (only the data element is included in the payload). +* **tokenKMSEncryptionKey**: The Cloud KMS key to use to decrypt the HEC token string. This parameter must be provided when tokenSource is set to KMS. If the Cloud KMS key is provided, the HEC token string must be passed in encrypted. For example, `projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name`. +* **tokenSecretId**: The Secret Manager secret ID for the token. This parameter must provided when the tokenSource is set to `SECRET_MANAGER`. For example, `projects/your-project-id/secrets/your-secret/versions/your-secret-version`. +* **tokenSource**: The source of the token. The following values are allowed: `PLAINTEXT`, `KMS`, and `SECRET_MANAGER`. You must provide this parameter when Secret Manager is used. If `tokenSource` is set to `KMS`, `tokenKMSEncryptionKey`, and encrypted, then `token` must be provided. If `tokenSource` is set to `SECRET_MANAGER`, then `tokenSecretId` must be provided. If `tokenSource` is set to `PLAINTEXT`, then `token` must be provided. +* **rootCaCertificatePath**: The full URL to the root CA certificate in Cloud Storage. The certificate provided in Cloud Storage must be DER-encoded and can be supplied in binary or printable (Base64) encoding. If the certificate is provided in Base64 encoding, it must be bounded at the beginning by -----BEGIN CERTIFICATE-----, and must be bounded at the end by -----END CERTIFICATE-----. If this parameter is provided, this private CA certificate file is fetched and added to the Dataflow worker's trust store in order to verify the Splunk HEC endpoint's SSL certificate. If this parameter is not provided, the default trust store is used. For example, `gs://mybucket/mycerts/privateCA.crt`. +* **enableBatchLogs**: Specifies whether logs should be enabled for batches written to Splunk. Default: `true`. +* **enableGzipHttpCompression**: Specifies whether HTTP requests sent to Splunk HEC should be compressed (gzip content encoded). Default: `true`. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **javascriptTextTransformReloadIntervalMinutes**: Define the interval that workers may check for JavaScript UDF changes to reload the files. Defaults to: 0. ## User-Defined functions (UDFs) @@ -275,18 +275,18 @@ resource "google_dataflow_job" "cloud_pubsub_to_splunk" { region = var.region temp_gcs_location = "gs://bucket-name-here/temp" parameters = { - inputSubscription = "projects/your-project-id/subscriptions/your-subscription-name" - url = "https://splunk-hec-host:8088" + inputSubscription = "" + url = "" outputDeadletterTopic = "" # token = "" # batchCount = "" # disableCertificateValidation = "" # parallelism = "" # includePubsubMessage = "" - # tokenKMSEncryptionKey = "projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name" - # tokenSecretId = "projects/your-project-id/secrets/your-secret/versions/your-secret-version" + # tokenKMSEncryptionKey = "" + # tokenSecretId = "" # tokenSource = "" - # rootCaCertificatePath = "gs://mybucket/mycerts/privateCA.crt" + # rootCaCertificatePath = "" # enableBatchLogs = "true" # enableGzipHttpCompression = "true" # javascriptTextTransformGcsPath = "" diff --git a/v1/README_Cloud_Spanner_to_GCS_Avro.md b/v1/README_Cloud_Spanner_to_GCS_Avro.md index eddd41c13e..65995ab6fd 100644 --- a/v1/README_Cloud_Spanner_to_GCS_Avro.md +++ b/v1/README_Cloud_Spanner_to_GCS_Avro.md @@ -33,21 +33,21 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **instanceId** : The instance ID of the Spanner database that you want to export. -* **databaseId** : The database ID of the Spanner database that you want to export. -* **outputDir** : The Cloud Storage path to export Avro files to. The export job creates a new directory under this path that contains the exported files. (Example: gs://your-bucket/your-path). +* **instanceId**: The instance ID of the Spanner database that you want to export. +* **databaseId**: The database ID of the Spanner database that you want to export. +* **outputDir**: The Cloud Storage path to export Avro files to. The export job creates a new directory under this path that contains the exported files. For example, `gs://your-bucket/your-path`. ### Optional parameters -* **avroTempDirectory** : The Cloud Storage path where temporary Avro files are written. -* **spannerHost** : The Cloud Spanner endpoint to call in the template. Only used for testing. (Example: https://batch-spanner.googleapis.com). Defaults to: https://batch-spanner.googleapis.com. -* **snapshotTime** : The timestamp that corresponds to the version of the Spanner database that you want to read. The timestamp must be specified by using RFC 3339 UTC `Zulu` format. The timestamp must be in the past, and maximum timestamp staleness applies. (Example: 1990-12-31T23:59:60Z). Defaults to empty. -* **spannerProjectId** : The ID of the Google Cloud project that contains the Spanner database that you want to read data from. -* **shouldExportTimestampAsLogicalType** : If true, timestamps are exported as a `long` type with `timestamp-micros` logical type. By default, this parameter is set to `false` and timestamps are exported as ISO-8601 strings at nanosecond precision. -* **tableNames** : A comma-separated list of tables specifying the subset of the Spanner database to export. If you set this parameter, you must either include all of the related tables (parent tables and foreign key referenced tables) or set the `shouldExportRelatedTables` parameter to `true`.If the table is in named schema, please use fully qualified name. For example: `sch1.foo` in which `sch1` is the schema name and `foo` is the table name. Defaults to empty. -* **shouldExportRelatedTables** : Whether to include related tables. This parameter is used in conjunction with the `tableNames` parameter. Defaults to: false. -* **spannerPriority** : The request priority for Spanner calls. Possible values are `HIGH`, `MEDIUM`, and `LOW`. The default value is `MEDIUM`. -* **dataBoostEnabled** : Set to `true` to use the compute resources of Spanner Data Boost to run the job with near-zero impact on Spanner OLTP workflows. When set to `true`, you also need the `spanner.databases.useDataBoost` IAM permission. For more information, see the Data Boost overview (https://cloud.google.com/spanner/docs/databoost/databoost-overview). Defaults to: false. +* **avroTempDirectory**: The Cloud Storage path where temporary Avro files are written. +* **spannerHost**: The Cloud Spanner endpoint to call in the template. Only used for testing. For example, `https://batch-spanner.googleapis.com`. Defaults to: https://batch-spanner.googleapis.com. +* **snapshotTime**: The timestamp that corresponds to the version of the Spanner database that you want to read. The timestamp must be specified by using RFC 3339 UTC `Zulu` format. The timestamp must be in the past, and maximum timestamp staleness applies. For example, `1990-12-31T23:59:60Z`. Defaults to empty. +* **spannerProjectId**: The ID of the Google Cloud project that contains the Spanner database that you want to read data from. +* **shouldExportTimestampAsLogicalType**: If `true`, timestamps are exported as a `long` type with `timestamp-micros` logical type. By default, this parameter is set to `false` and timestamps are exported as ISO-8601 strings at nanosecond precision. +* **tableNames**: A comma-separated list of tables specifying the subset of the Spanner database to export. If you set this parameter, you must either include all of the related tables (parent tables and foreign key referenced tables) or set the `shouldExportRelatedTables` parameter to `true`.If the table is in named schema, please use fully qualified name. For example: `sch1.foo` in which `sch1` is the schema name and `foo` is the table name. Defaults to empty. +* **shouldExportRelatedTables**: Whether to include related tables. This parameter is used in conjunction with the `tableNames` parameter. Defaults to: false. +* **spannerPriority**: The request priority for Spanner calls. Possible values are `HIGH`, `MEDIUM`, and `LOW`. The default value is `MEDIUM`. +* **dataBoostEnabled**: Set to `true` to use the compute resources of Spanner Data Boost to run the job with near-zero impact on Spanner OLTP workflows. When set to `true`, you also need the `spanner.databases.useDataBoost` IAM permission. For more information, see the Data Boost overview (https://cloud.google.com/spanner/docs/databoost/databoost-overview). Defaults to: false. @@ -247,10 +247,10 @@ resource "google_dataflow_job" "cloud_spanner_to_gcs_avro" { parameters = { instanceId = "" databaseId = "" - outputDir = "gs://your-bucket/your-path" + outputDir = "" # avroTempDirectory = "" # spannerHost = "https://batch-spanner.googleapis.com" - # snapshotTime = "1990-12-31T23:59:60Z" + # snapshotTime = "" # spannerProjectId = "" # shouldExportTimestampAsLogicalType = "false" # tableNames = "" diff --git a/v1/README_Cloud_Spanner_vectors_to_Cloud_Storage.md b/v1/README_Cloud_Spanner_vectors_to_Cloud_Storage.md index 4a1cb4512a..a19a3106a6 100644 --- a/v1/README_Cloud_Spanner_vectors_to_Cloud_Storage.md +++ b/v1/README_Cloud_Spanner_vectors_to_Cloud_Storage.md @@ -25,20 +25,20 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **spannerProjectId** : The project ID of the Spanner instance. -* **spannerInstanceId** : The ID of the Spanner instance to export the vector embeddings from. -* **spannerDatabaseId** : The ID of the Spanner database to export the vector embeddings from. -* **spannerTable** : The Spanner table to read from. -* **spannerColumnsToExport** : A comma-separated list of required columns for the Vertex AI Vector Search index. The ID and embedding columns are required by Vector Search. If your column names don't match the Vertex AI Vector Search index input structure, create column mappings by using aliases. If the column names don't match the format expected by Vertex AI, use the notation from:to. For example, if you have columns named id and my_embedding, specify id, my_embedding:embedding. -* **gcsOutputFolder** : The Cloud Storage folder to write output files to. The path must end with a slash. (Example: gs://your-bucket/folder1/). -* **gcsOutputFilePrefix** : The filename prefix for writing output files. (Example: vector-embeddings). +* **spannerProjectId**: The project ID of the Spanner instance. +* **spannerInstanceId**: The ID of the Spanner instance to export the vector embeddings from. +* **spannerDatabaseId**: The ID of the Spanner database to export the vector embeddings from. +* **spannerTable**: The Spanner table to read from. +* **spannerColumnsToExport**: A comma-separated list of required columns for the Vertex AI Vector Search index. The ID and embedding columns are required by Vector Search. If your column names don't match the Vertex AI Vector Search index input structure, create column mappings by using aliases. If the column names don't match the format expected by Vertex AI, use the notation from:to. For example, if you have columns named id and my_embedding, specify id, my_embedding:embedding. +* **gcsOutputFolder**: The Cloud Storage folder to write output files to. The path must end with a slash. For example, `gs://your-bucket/folder1/`. +* **gcsOutputFilePrefix**: The filename prefix for writing output files. For example, `vector-embeddings`. ### Optional parameters -* **spannerHost** : The Spanner endpoint to call in the template. The default value is https://batch-spanner.googleapis.com. (Example: https://batch-spanner.googleapis.com). -* **spannerVersionTime** : If set, specifies the time when the database version must be taken. The value is a string in the RFC-3339 date format in Unix epoch time. For example: 1990-12-31T23:59:60Z. The timestamp must be in the past, and maximum timestamp staleness (https://cloud.google.com/spanner/docs/timestamp-bounds#maximum_timestamp_staleness) applies. If not set, a strong bound (https://cloud.google.com/spanner/docs/timestamp-bounds#strong) is used to read the latest data. Defaults to empty. (Example: 1990-12-31T23:59:60Z). -* **spannerDataBoostEnabled** : When set to true, the template uses Spanner on-demand compute. The export job runs on independent compute resources that don't impact current Spanner workloads. Using this option incurs additional charges in Spanner. For more information, see Spanner Data Boost overview (https://cloud.google.com/spanner/docs/databoost/databoost-overview). Defaults to: false. -* **spannerPriority** : The request priority for Spanner calls. The allowed values are HIGH, MEDIUM, and LOW. The default value is MEDIUM. +* **spannerHost**: The Spanner endpoint to call in the template. The default value is https://batch-spanner.googleapis.com. For example, `https://batch-spanner.googleapis.com`. +* **spannerVersionTime**: If set, specifies the time when the database version must be taken. The value is a string in the RFC-3339 date format in Unix epoch time. For example: `1990-12-31T23:59:60Z`. The timestamp must be in the past, and maximum timestamp staleness (https://cloud.google.com/spanner/docs/timestamp-bounds#maximum_timestamp_staleness) applies. If not set, a strong bound (https://cloud.google.com/spanner/docs/timestamp-bounds#strong) is used to read the latest data. Defaults to `empty`. For example, `1990-12-31T23:59:60Z`. +* **spannerDataBoostEnabled**: When set to `true`, the template uses Spanner on-demand compute. The export job runs on independent compute resources that don't impact current Spanner workloads. Using this option incurs additional charges in Spanner. For more information, see Spanner Data Boost overview (https://cloud.google.com/spanner/docs/databoost/databoost-overview). Defaults to: `false`. +* **spannerPriority**: The request priority for Spanner calls. The allowed values are `HIGH`, `MEDIUM`, and `LOW`. The default value is `MEDIUM`. @@ -238,10 +238,10 @@ resource "google_dataflow_job" "cloud_spanner_vectors_to_cloud_storage" { spannerDatabaseId = "" spannerTable = "" spannerColumnsToExport = "" - gcsOutputFolder = "gs://your-bucket/folder1/" - gcsOutputFilePrefix = "vector-embeddings" + gcsOutputFolder = "" + gcsOutputFilePrefix = "" # spannerHost = "https://batch-spanner.googleapis.com" - # spannerVersionTime = "1990-12-31T23:59:60Z" + # spannerVersionTime = "" # spannerDataBoostEnabled = "false" # spannerPriority = "" } diff --git a/v1/README_Datastore_to_Datastore_Delete.md b/v1/README_Datastore_to_Datastore_Delete.md index a57e414abb..5225c01a6d 100644 --- a/v1/README_Datastore_to_Datastore_Delete.md +++ b/v1/README_Datastore_to_Datastore_Delete.md @@ -18,16 +18,16 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **datastoreReadGqlQuery** : A GQL (https://cloud.google.com/datastore/docs/reference/gql_reference) query that specifies which entities to grab. For example, `SELECT * FROM MyKind`. -* **datastoreReadProjectId** : The ID of the Google Cloud project that contains the Datastore instance that you want to read data from. -* **datastoreDeleteProjectId** : Google Cloud Project Id of where to delete the datastore entities. +* **datastoreReadGqlQuery**: A GQL (https://cloud.google.com/datastore/docs/reference/gql_reference) query that specifies which entities to grab. For example, `SELECT * FROM MyKind`. +* **datastoreReadProjectId**: The ID of the Google Cloud project that contains the Datastore instance that you want to read data from. +* **datastoreDeleteProjectId**: Google Cloud Project Id of where to delete the datastore entities. ### Optional parameters -* **datastoreReadNamespace** : The namespace of the requested entities. To use the default namespace, leave this parameter blank. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **datastoreHintNumWorkers** : Hint for the expected number of workers in the Datastore ramp-up throttling step. Defaults to: 500. +* **datastoreReadNamespace**: The namespace of the requested entities. To use the default namespace, leave this parameter blank. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **datastoreHintNumWorkers**: Hint for the expected number of workers in the Datastore ramp-up throttling step. Defaults to: 500. ## User-Defined functions (UDFs) diff --git a/v1/README_Datastore_to_GCS_Text.md b/v1/README_Datastore_to_GCS_Text.md index 4a513cf37a..cd7eebdf74 100644 --- a/v1/README_Datastore_to_GCS_Text.md +++ b/v1/README_Datastore_to_GCS_Text.md @@ -19,15 +19,15 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **datastoreReadGqlQuery** : A GQL (https://cloud.google.com/datastore/docs/reference/gql_reference) query that specifies which entities to grab. For example, `SELECT * FROM MyKind`. -* **datastoreReadProjectId** : The ID of the Google Cloud project that contains the Datastore instance that you want to read data from. -* **textWritePrefix** : The Cloud Storage path prefix that specifies where the data is written. (Example: gs://mybucket/somefolder/). +* **datastoreReadGqlQuery**: A GQL (https://cloud.google.com/datastore/docs/reference/gql_reference) query that specifies which entities to grab. For example, `SELECT * FROM MyKind`. +* **datastoreReadProjectId**: The ID of the Google Cloud project that contains the Datastore instance that you want to read data from. +* **textWritePrefix**: The Cloud Storage path prefix that specifies where the data is written. For example, `gs://mybucket/somefolder/`. ### Optional parameters -* **datastoreReadNamespace** : The namespace of the requested entities. To use the default namespace, leave this parameter blank. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **datastoreReadNamespace**: The namespace of the requested entities. To use the default namespace, leave this parameter blank. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). ## User-Defined functions (UDFs) @@ -219,7 +219,7 @@ resource "google_dataflow_job" "datastore_to_gcs_text" { parameters = { datastoreReadGqlQuery = "" datastoreReadProjectId = "" - textWritePrefix = "gs://mybucket/somefolder/" + textWritePrefix = "" # datastoreReadNamespace = "" # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" diff --git a/v1/README_Firestore_to_Firestore_Delete.md b/v1/README_Firestore_to_Firestore_Delete.md index eb15ffd886..5801238a6b 100644 --- a/v1/README_Firestore_to_Firestore_Delete.md +++ b/v1/README_Firestore_to_Firestore_Delete.md @@ -18,16 +18,16 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **firestoreReadGqlQuery** : A GQL (https://cloud.google.com/datastore/docs/reference/gql_reference) query that specifies which entities to grab. For example, `SELECT * FROM MyKind`. -* **firestoreReadProjectId** : The ID of the Google Cloud project that contains the Firestore instance that you want to read data from. -* **firestoreDeleteProjectId** : Google Cloud Project Id of where to delete the firestore entities. +* **firestoreReadGqlQuery**: A GQL (https://cloud.google.com/datastore/docs/reference/gql_reference) query that specifies which entities to grab. For example, `SELECT * FROM MyKind`. +* **firestoreReadProjectId**: The ID of the Google Cloud project that contains the Firestore instance that you want to read data from. +* **firestoreDeleteProjectId**: Google Cloud Project Id of where to delete the firestore entities. ### Optional parameters -* **firestoreReadNamespace** : The namespace of the requested entities. To use the default namespace, leave this parameter blank. -* **firestoreHintNumWorkers** : Hint for the expected number of workers in the Firestore ramp-up throttling step. Defaults to: 500. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **firestoreReadNamespace**: The namespace of the requested entities. To use the default namespace, leave this parameter blank. +* **firestoreHintNumWorkers**: Hint for the expected number of workers in the Firestore ramp-up throttling step. Defaults to: 500. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). ## User-Defined functions (UDFs) diff --git a/v1/README_Firestore_to_GCS_Text.md b/v1/README_Firestore_to_GCS_Text.md index 569bbec1bf..3110a1a190 100644 --- a/v1/README_Firestore_to_GCS_Text.md +++ b/v1/README_Firestore_to_GCS_Text.md @@ -19,15 +19,15 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **firestoreReadGqlQuery** : A GQL (https://cloud.google.com/datastore/docs/reference/gql_reference) query that specifies which entities to grab. For example, `SELECT * FROM MyKind`. -* **firestoreReadProjectId** : The ID of the Google Cloud project that contains the Firestore instance that you want to read data from. -* **textWritePrefix** : The Cloud Storage path prefix that specifies where the data is written. (Example: gs://mybucket/somefolder/). +* **firestoreReadGqlQuery**: A GQL (https://cloud.google.com/datastore/docs/reference/gql_reference) query that specifies which entities to grab. For example, `SELECT * FROM MyKind`. +* **firestoreReadProjectId**: The ID of the Google Cloud project that contains the Firestore instance that you want to read data from. +* **textWritePrefix**: The Cloud Storage path prefix that specifies where the data is written. For example, `gs://mybucket/somefolder/`. ### Optional parameters -* **firestoreReadNamespace** : The namespace of the requested entities. To use the default namespace, leave this parameter blank. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **firestoreReadNamespace**: The namespace of the requested entities. To use the default namespace, leave this parameter blank. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). ## User-Defined functions (UDFs) @@ -219,7 +219,7 @@ resource "google_dataflow_job" "firestore_to_gcs_text" { parameters = { firestoreReadGqlQuery = "" firestoreReadProjectId = "" - textWritePrefix = "gs://mybucket/somefolder/" + textWritePrefix = "" # firestoreReadNamespace = "" # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" diff --git a/v1/README_GCS_Avro_to_Cloud_Bigtable.md b/v1/README_GCS_Avro_to_Cloud_Bigtable.md index 3999f51f8d..e50f50f4b9 100644 --- a/v1/README_GCS_Avro_to_Cloud_Bigtable.md +++ b/v1/README_GCS_Avro_to_Cloud_Bigtable.md @@ -18,14 +18,14 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **bigtableProjectId** : The ID of the Google Cloud project that contains the Bigtable instance that you want to write data to. -* **bigtableInstanceId** : The ID of the Bigtable instance that contains the table. -* **bigtableTableId** : The ID of the Bigtable table to import. -* **inputFilePattern** : The Cloud Storage path pattern where data is located. (Example: gs:////*). +* **bigtableProjectId**: The ID of the Google Cloud project that contains the Bigtable instance that you want to write data to. +* **bigtableInstanceId**: The ID of the Bigtable instance that contains the table. +* **bigtableTableId**: The ID of the Bigtable table to import. +* **inputFilePattern**: The Cloud Storage path pattern where data is located. For example, `gs:///FOLDER/PREFIX*`. ### Optional parameters -* **splitLargeRows** : The flag for enabling splitting of large rows into multiple MutateRows requests. Note that when a large row is split between multiple API calls, the updates to the row are not atomic. . +* **splitLargeRows**: The flag for enabling splitting of large rows into multiple MutateRows requests. Note that when a large row is split between multiple API calls, the updates to the row are not atomic. @@ -205,7 +205,7 @@ resource "google_dataflow_job" "gcs_avro_to_cloud_bigtable" { bigtableProjectId = "" bigtableInstanceId = "" bigtableTableId = "" - inputFilePattern = "gs:////*" + inputFilePattern = "" # splitLargeRows = "" } } diff --git a/v1/README_GCS_Avro_to_Cloud_Spanner.md b/v1/README_GCS_Avro_to_Cloud_Spanner.md index 6b57828c39..ee4cf0ccaf 100644 --- a/v1/README_GCS_Avro_to_Cloud_Spanner.md +++ b/v1/README_GCS_Avro_to_Cloud_Spanner.md @@ -18,21 +18,21 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **instanceId** : The instance ID of the Spanner database. -* **databaseId** : The database ID of the Spanner database. -* **inputDir** : The Cloud Storage path where the Avro files are imported from. +* **instanceId**: The instance ID of the Spanner database. +* **databaseId**: The database ID of the Spanner database. +* **inputDir**: The Cloud Storage path where the Avro files are imported from. ### Optional parameters -* **spannerHost** : The Cloud Spanner endpoint to call in the template. Only used for testing. (Example: https://batch-spanner.googleapis.com). Defaults to: https://batch-spanner.googleapis.com. -* **waitForIndexes** : If `true`, the pipeline waits for indexes to be created. If `false`, the job might complete while indexes are still being created in the background. The default value is `false`. -* **waitForForeignKeys** : If `true`, the pipeline waits for foreign keys to be created. If `false`, the job might complete while foreign keys are still being created in the background. The default value is `false`. -* **waitForChangeStreams** : If `true`, the pipeline waits for change streams to be created. If `false`, the job might complete while change streams are still being created in the background. The default value is `true`. -* **waitForSequences** : By default, the import pipeline is blocked on sequence creation. If `false`, the import pipeline might complete with sequences still being created in the background. -* **earlyIndexCreateFlag** : Specifies whether early index creation is enabled. If the template runs a large number of DDL statements, it's more efficient to create indexes before loading data. Therefore, the default behavior is to create the indexes first when the number of DDL statements exceeds a threshold. To disable this feature, set `earlyIndexCreateFlag` to `false`. The default value is `true`. -* **spannerProjectId** : The ID of the Google Cloud project that contains the Spanner database. If not set, the default Google Cloud project is used. -* **ddlCreationTimeoutInMinutes** : The timeout in minutes for DDL statements performed by the template. The default value is 30 minutes. -* **spannerPriority** : The request priority for Spanner calls. Possible values are `HIGH`, `MEDIUM`, and `LOW`. The default value is `MEDIUM`. +* **spannerHost**: The Cloud Spanner endpoint to call in the template. Only used for testing. For example, `https://batch-spanner.googleapis.com`. Defaults to: https://batch-spanner.googleapis.com. +* **waitForIndexes**: If `true`, the pipeline waits for indexes to be created. If `false`, the job might complete while indexes are still being created in the background. The default value is `false`. +* **waitForForeignKeys**: If `true`, the pipeline waits for foreign keys to be created. If `false`, the job might complete while foreign keys are still being created in the background. The default value is `false`. +* **waitForChangeStreams**: If `true`, the pipeline waits for change streams to be created. If `false`, the job might complete while change streams are still being created in the background. The default value is `true`. +* **waitForSequences**: By default, the import pipeline is blocked on sequence creation. If `false`, the import pipeline might complete with sequences still being created in the background. +* **earlyIndexCreateFlag**: Specifies whether early index creation is enabled. If the template runs a large number of DDL statements, it's more efficient to create indexes before loading data. Therefore, the default behavior is to create the indexes first when the number of DDL statements exceeds a threshold. To disable this feature, set `earlyIndexCreateFlag` to `false`. The default value is `true`. +* **spannerProjectId**: The ID of the Google Cloud project that contains the Spanner database. If not set, the default Google Cloud project is used. +* **ddlCreationTimeoutInMinutes**: The timeout in minutes for DDL statements performed by the template. The default value is 30 minutes. +* **spannerPriority**: The request priority for Spanner calls. Possible values are `HIGH`, `MEDIUM`, and `LOW`. The default value is `MEDIUM`. diff --git a/v1/README_GCS_CSV_to_BigQuery.md b/v1/README_GCS_CSV_to_BigQuery.md index 2d742cff3d..392774e6a6 100644 --- a/v1/README_GCS_CSV_to_BigQuery.md +++ b/v1/README_GCS_CSV_to_BigQuery.md @@ -16,18 +16,18 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputFilePattern** : The Cloud Storage path to the CSV file that contains the text to process. (Example: gs://your-bucket/path/*.csv). -* **schemaJSONPath** : The Cloud Storage path to the JSON file that defines your BigQuery schema. -* **outputTable** : The name of the BigQuery table that stores your processed data. If you reuse an existing BigQuery table, the data is appended to the destination table. -* **bigQueryLoadingTemporaryDirectory** : The temporary directory to use during the BigQuery loading process. (Example: gs://your-bucket/your-files/temp_dir). -* **badRecordsOutputTable** : The name of the BigQuery table to use to store the rejected data when processing the CSV files. If you reuse an existing BigQuery table, the data is appended to the destination table. The schema of this table must match the error table schema (https://cloud.google.com/dataflow/docs/guides/templates/provided/cloud-storage-csv-to-bigquery#GcsCSVToBigQueryBadRecordsSchema). -* **delimiter** : The column delimiter that the CSV file uses. (Example: ,). -* **csvFormat** : The CSV format according to Apache Commons CSV format. Defaults to: Default. +* **inputFilePattern**: The Cloud Storage path to the CSV file that contains the text to process. For example, `gs://your-bucket/path/*.csv`. +* **schemaJSONPath**: The Cloud Storage path to the JSON file that defines your BigQuery schema. +* **outputTable**: The name of the BigQuery table that stores your processed data. If you reuse an existing BigQuery table, the data is appended to the destination table. +* **bigQueryLoadingTemporaryDirectory**: The temporary directory to use during the BigQuery loading process. For example, `gs://your-bucket/your-files/temp_dir`. +* **badRecordsOutputTable**: The name of the BigQuery table to use to store the rejected data when processing the CSV files. If you reuse an existing BigQuery table, the data is appended to the destination table. The schema of this table must match the error table schema (https://cloud.google.com/dataflow/docs/guides/templates/provided/cloud-storage-csv-to-bigquery#GcsCSVToBigQueryBadRecordsSchema). +* **delimiter**: The column delimiter that the CSV file uses. For example, `,`. +* **csvFormat**: The CSV format according to Apache Commons CSV format. Defaults to: `Default`. ### Optional parameters -* **containsHeaders** : Whether headers are included in the CSV file. Defaults to: false. -* **csvFileEncoding** : The CSV file character encoding format. Allowed Values are US-ASCII, ISO-8859-1, UTF-8, and UTF-16. Defaults to: UTF-8. +* **containsHeaders**: Whether headers are included in the CSV file. Defaults to: `false`. +* **csvFileEncoding**: The CSV file character encoding format. Allowed Values are `US-ASCII`, `ISO-8859-1`, `UTF-8`, and `UTF-16`. Defaults to: UTF-8. @@ -216,12 +216,12 @@ resource "google_dataflow_job" "gcs_csv_to_bigquery" { region = var.region temp_gcs_location = "gs://bucket-name-here/temp" parameters = { - inputFilePattern = "gs://your-bucket/path/*.csv" + inputFilePattern = "" schemaJSONPath = "" outputTable = "" - bigQueryLoadingTemporaryDirectory = "gs://your-bucket/your-files/temp_dir" + bigQueryLoadingTemporaryDirectory = "" badRecordsOutputTable = "" - delimiter = "," + delimiter = "" csvFormat = "" # containsHeaders = "false" # csvFileEncoding = "UTF-8" diff --git a/v1/README_GCS_Parquet_to_Cloud_Bigtable.md b/v1/README_GCS_Parquet_to_Cloud_Bigtable.md index 8ea1677b77..012aefa96d 100644 --- a/v1/README_GCS_Parquet_to_Cloud_Bigtable.md +++ b/v1/README_GCS_Parquet_to_Cloud_Bigtable.md @@ -18,14 +18,14 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **bigtableProjectId** : The Google Cloud project ID associated with the Bigtable instance. -* **bigtableInstanceId** : The ID of the Cloud Bigtable instance that contains the table. -* **bigtableTableId** : The ID of the Bigtable table to import. -* **inputFilePattern** : The Cloud Storage path with the files that contain the data. (Example: gs://your-bucket/your-files/*.parquet). +* **bigtableProjectId**: The Google Cloud project ID associated with the Bigtable instance. +* **bigtableInstanceId**: The ID of the Cloud Bigtable instance that contains the table. +* **bigtableTableId**: The ID of the Bigtable table to import. +* **inputFilePattern**: The Cloud Storage path with the files that contain the data. For example, `gs://your-bucket/your-files/*.parquet`. ### Optional parameters -* **splitLargeRows** : The flag for enabling splitting of large rows into multiple MutateRows requests. Note that when a large row is split between multiple API calls, the updates to the row are not atomic. . +* **splitLargeRows**: The flag for enabling splitting of large rows into multiple MutateRows requests. Note that when a large row is split between multiple API calls, the updates to the row are not atomic. @@ -205,7 +205,7 @@ resource "google_dataflow_job" "gcs_parquet_to_cloud_bigtable" { bigtableProjectId = "" bigtableInstanceId = "" bigtableTableId = "" - inputFilePattern = "gs://your-bucket/your-files/*.parquet" + inputFilePattern = "" # splitLargeRows = "" } } diff --git a/v1/README_GCS_SequenceFile_to_Cloud_Bigtable.md b/v1/README_GCS_SequenceFile_to_Cloud_Bigtable.md index 85e1464716..883d48356d 100644 --- a/v1/README_GCS_SequenceFile_to_Cloud_Bigtable.md +++ b/v1/README_GCS_SequenceFile_to_Cloud_Bigtable.md @@ -18,15 +18,15 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **bigtableProject** : The ID of the Google Cloud project that contains the Bigtable instance that you want to write data to. -* **bigtableInstanceId** : The ID of the Bigtable instance that contains the table. -* **bigtableTableId** : The ID of the Bigtable table to import. -* **sourcePattern** : The Cloud Storage path pattern to the location of the data. (Example: gs://your-bucket/your-path/prefix*). +* **bigtableProject**: The ID of the Google Cloud project that contains the Bigtable instance that you want to write data to. +* **bigtableInstanceId**: The ID of the Bigtable instance that contains the table. +* **bigtableTableId**: The ID of the Bigtable table to import. +* **sourcePattern**: The Cloud Storage path pattern to the location of the data. For example, `gs://your-bucket/your-path/prefix*`. ### Optional parameters -* **bigtableAppProfileId** : The ID of the Bigtable application profile to use for the import. If you don't specify an application profile, Bigtable uses the instance's default application profile (https://cloud.google.com/bigtable/docs/app-profiles#default-app-profile). -* **mutationThrottleLatencyMs** : Optional Set mutation latency throttling (enables the feature). Value in milliseconds. Defaults to: 0. +* **bigtableAppProfileId**: The ID of the Bigtable application profile to use for the import. If you don't specify an application profile, Bigtable uses the instance's default application profile (https://cloud.google.com/bigtable/docs/app-profiles#default-app-profile). +* **mutationThrottleLatencyMs**: Optional Set mutation latency throttling (enables the feature). Value in milliseconds. Defaults to: 0. @@ -209,7 +209,7 @@ resource "google_dataflow_job" "gcs_sequencefile_to_cloud_bigtable" { bigtableProject = "" bigtableInstanceId = "" bigtableTableId = "" - sourcePattern = "gs://your-bucket/your-path/prefix*" + sourcePattern = "" # bigtableAppProfileId = "" # mutationThrottleLatencyMs = "0" } diff --git a/v1/README_GCS_Text_to_BigQuery.md b/v1/README_GCS_Text_to_BigQuery.md index 11feb39ca2..49815457aa 100644 --- a/v1/README_GCS_Text_to_BigQuery.md +++ b/v1/README_GCS_Text_to_BigQuery.md @@ -19,8 +19,8 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputFilePattern** : Path of the file pattern glob to read from. (Example: gs://your-bucket/path/*.csv). -* **JSONPath** : JSON file with BigQuery Schema description. JSON Example: { +* **inputFilePattern**: Path of the file pattern glob to read from. For example, `gs://your-bucket/path/*.csv`. +* **JSONPath**: JSON file with BigQuery Schema description. JSON Example: { "BigQuery Schema": [ { "name": "location", @@ -44,13 +44,13 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat } ] }. -* **outputTable** : BigQuery table location to write the output to. The table's schema must match the input objects. -* **bigQueryLoadingTemporaryDirectory** : Temporary directory for BigQuery loading process (Example: gs://your-bucket/your-files/temp_dir). +* **outputTable**: BigQuery table location to write the output to. The table's schema must match the input objects. +* **bigQueryLoadingTemporaryDirectory**: Temporary directory for BigQuery loading process For example, `gs://your-bucket/your-files/temp_dir`. ### Optional parameters -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). ## User-Defined functions (UDFs) @@ -240,10 +240,10 @@ resource "google_dataflow_job" "gcs_text_to_bigquery" { region = var.region temp_gcs_location = "gs://bucket-name-here/temp" parameters = { - inputFilePattern = "gs://your-bucket/path/*.csv" + inputFilePattern = "" JSONPath = "" outputTable = "" - bigQueryLoadingTemporaryDirectory = "gs://your-bucket/your-files/temp_dir" + bigQueryLoadingTemporaryDirectory = "" # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" } diff --git a/v1/README_GCS_Text_to_Cloud_PubSub.md b/v1/README_GCS_Text_to_Cloud_PubSub.md index 76626c5237..a284548e9d 100644 --- a/v1/README_GCS_Text_to_Cloud_PubSub.md +++ b/v1/README_GCS_Text_to_Cloud_PubSub.md @@ -24,8 +24,8 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputFilePattern** : The input file pattern to read from. (Example: gs://bucket-name/files/*.json). -* **outputTopic** : The Pub/Sub input topic to write to. The name must be in the format `projects//topics/`. (Example: projects/your-project-id/topics/your-topic-name). +* **inputFilePattern**: The input file pattern to read from. For example, `gs://bucket-name/files/*.json`. +* **outputTopic**: The Pub/Sub input topic to write to. The name must be in the format `projects//topics/`. For example, `projects/your-project-id/topics/your-topic-name`. ### Optional parameters @@ -196,8 +196,8 @@ resource "google_dataflow_job" "gcs_text_to_cloud_pubsub" { region = var.region temp_gcs_location = "gs://bucket-name-here/temp" parameters = { - inputFilePattern = "gs://bucket-name/files/*.json" - outputTopic = "projects/your-project-id/topics/your-topic-name" + inputFilePattern = "" + outputTopic = "" } } ``` diff --git a/v1/README_GCS_Text_to_Cloud_Spanner.md b/v1/README_GCS_Text_to_Cloud_Spanner.md index de286dae99..01ce3a7800 100644 --- a/v1/README_GCS_Text_to_Cloud_Spanner.md +++ b/v1/README_GCS_Text_to_Cloud_Spanner.md @@ -17,24 +17,24 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **instanceId** : The instance ID of the Spanner database. -* **databaseId** : The database ID of the Spanner database. -* **importManifest** : The path in Cloud Storage to use when importing manifest files. (Example: gs://your-bucket/your-folder/your-manifest.json). +* **instanceId**: The instance ID of the Spanner database. +* **databaseId**: The database ID of the Spanner database. +* **importManifest**: The path in Cloud Storage to use when importing manifest files. For example, `gs://your-bucket/your-folder/your-manifest.json`. ### Optional parameters -* **spannerHost** : The Cloud Spanner endpoint to call in the template. Only used for testing. (Example: https://batch-spanner.googleapis.com). Defaults to: https://batch-spanner.googleapis.com. -* **columnDelimiter** : The column delimiter that the source file uses. The default value is ','. (Example: ,). -* **fieldQualifier** : The character that must surround any value in the source file that contains the columnDelimiter. The default value is ". -* **trailingDelimiter** : Specifies whether the lines in the source files have trailing delimiters, that is, whether the `columnDelimiter` character appears at the end of each line, after the last column value). The default value is `true`. -* **escape** : The escape character the source file uses. By default, this parameter is not set and the template does not use the escape character. -* **nullString** : The string that represents a `NULL` value. By default, this parameter is not set and the template does not use the null string. -* **dateFormat** : The format used to parse date columns. By default, the pipeline tries to parse the date columns as `yyyy-M-d[' 00:00:00']`, for example, as 2019-01-31 or 2019-1-1 00:00:00. If your date format is different, specify the format using the java.time.format.DateTimeFormatter (https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/time/format/DateTimeFormatter.html) patterns. -* **timestampFormat** : The format used to parse timestamp columns. If the timestamp is a long integer, then it is parsed as Unix epoch time. Otherwise, it is parsed as a string using the java.time.format.DateTimeFormatter.ISO_INSTANT (https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/time/format/DateTimeFormatter.html#ISO_INSTANT) format. For other cases, specify your own pattern string, for example, using `MMM dd yyyy HH:mm:ss.SSSVV` for timestamps in the form of `"Jan 21 1998 01:02:03.456+08:00"`. -* **spannerProjectId** : The ID of the Google Cloud project that contains the Spanner database. If not set, the project ID of the default Google Cloud project is used. -* **spannerPriority** : The request priority for Spanner calls. Possible values are HIGH, MEDIUM, and LOW. The default value is MEDIUM. -* **handleNewLine** : If `true`, the input data can contain newline characters. Otherwise, newline characters cause an error. The default value is `false`. Enabling newline handling can reduce performance. -* **invalidOutputPath** : The Cloud Storage path to use when writing rows that cannot be imported. (Example: gs://your-bucket/your-path). Defaults to empty. +* **spannerHost**: The Cloud Spanner endpoint to call in the template. Only used for testing. For example, `https://batch-spanner.googleapis.com`. Defaults to: https://batch-spanner.googleapis.com. +* **columnDelimiter**: The column delimiter that the source file uses. The default value is `,`. For example, `,`. +* **fieldQualifier**: The character that must surround any value in the source file that contains the columnDelimiter. The default value is double quotes. +* **trailingDelimiter**: Specifies whether the lines in the source files have trailing delimiters, that is, whether the `columnDelimiter` character appears at the end of each line, after the last column value. The default value is `true`. +* **escape**: The escape character the source file uses. By default, this parameter is not set and the template does not use the escape character. +* **nullString**: The string that represents a `NULL` value. By default, this parameter is not set and the template does not use the null string. +* **dateFormat**: The format used to parse date columns. By default, the pipeline tries to parse the date columns as `yyyy-M-d[' 00:00:00']`, for example, as `2019-01-31` or `2019-1-1 00:00:00`. If your date format is different, specify the format using the java.time.format.DateTimeFormatter (https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/time/format/DateTimeFormatter.html) patterns. +* **timestampFormat**: The format used to parse timestamp columns. If the timestamp is a long integer, then it is parsed as Unix epoch time. Otherwise, it is parsed as a string using the java.time.format.DateTimeFormatter.ISO_INSTANT (https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/time/format/DateTimeFormatter.html#ISO_INSTANT) format. For other cases, specify your own pattern string, for example, using `MMM dd yyyy HH:mm:ss.SSSVV` for timestamps in the form of `Jan 21 1998 01:02:03.456+08:00`. +* **spannerProjectId**: The ID of the Google Cloud project that contains the Spanner database. If not set, the project ID of the default Google Cloud project is used. +* **spannerPriority**: The request priority for Spanner calls. Possible values are `HIGH`, `MEDIUM`, and `LOW`. The default value is `MEDIUM`. +* **handleNewLine**: If `true`, the input data can contain newline characters. Otherwise, newline characters cause an error. The default value is `false`. Enabling newline handling can reduce performance. +* **invalidOutputPath**: The Cloud Storage path to use when writing rows that cannot be imported. For example, `gs://your-bucket/your-path`. Defaults to empty. @@ -243,7 +243,7 @@ resource "google_dataflow_job" "gcs_text_to_cloud_spanner" { parameters = { instanceId = "" databaseId = "" - importManifest = "gs://your-bucket/your-folder/your-manifest.json" + importManifest = "" # spannerHost = "https://batch-spanner.googleapis.com" # columnDelimiter = "," # fieldQualifier = """ @@ -255,7 +255,7 @@ resource "google_dataflow_job" "gcs_text_to_cloud_spanner" { # spannerProjectId = "" # spannerPriority = "" # handleNewLine = "false" - # invalidOutputPath = "gs://your-bucket/your-path" + # invalidOutputPath = "" } } ``` diff --git a/v1/README_GCS_Text_to_Datastore.md b/v1/README_GCS_Text_to_Datastore.md index a7ac8e2d9f..179d9c9570 100644 --- a/v1/README_GCS_Text_to_Datastore.md +++ b/v1/README_GCS_Text_to_Datastore.md @@ -20,15 +20,15 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **textReadPattern** : A Cloud Storage path pattern that specifies the location of your text data files. For example, `gs://mybucket/somepath/*.json`. -* **datastoreWriteProjectId** : The ID of the Google Cloud project to write the Datastore entities to. -* **errorWritePath** : The error log output file to use for write failures that occur during processing. (Example: gs://your-bucket/errors/). +* **textReadPattern**: A Cloud Storage path pattern that specifies the location of your text data files. For example, `gs://mybucket/somepath/*.json`. +* **datastoreWriteProjectId**: The ID of the Google Cloud project to write the Datastore entities to. +* **errorWritePath**: The error log output file to use for write failures that occur during processing. For example, `gs://your-bucket/errors/`. ### Optional parameters -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **datastoreHintNumWorkers** : Hint for the expected number of workers in the Datastore ramp-up throttling step. Default is `500`. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **datastoreHintNumWorkers**: Hint for the expected number of workers in the Datastore ramp-up throttling step. Defaults to `500`. ## User-Defined functions (UDFs) @@ -220,7 +220,7 @@ resource "google_dataflow_job" "gcs_text_to_datastore" { parameters = { textReadPattern = "" datastoreWriteProjectId = "" - errorWritePath = "gs://your-bucket/errors/" + errorWritePath = "" # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" # datastoreHintNumWorkers = "500" diff --git a/v1/README_GCS_Text_to_Firestore.md b/v1/README_GCS_Text_to_Firestore.md index f0e1f4a4bd..522ffd0498 100644 --- a/v1/README_GCS_Text_to_Firestore.md +++ b/v1/README_GCS_Text_to_Firestore.md @@ -20,15 +20,15 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **textReadPattern** : A Cloud Storage path pattern that specifies the location of your text data files. For example, `gs://mybucket/somepath/*.json`. -* **firestoreWriteProjectId** : The ID of the Google Cloud project to write the Firestore entities to. -* **errorWritePath** : The error log output file to use for write failures that occur during processing. (Example: gs://your-bucket/errors/). +* **textReadPattern**: A Cloud Storage path pattern that specifies the location of your text data files. For example, `gs://mybucket/somepath/*.json`. +* **firestoreWriteProjectId**: The ID of the Google Cloud project to write the Firestore entities to. +* **errorWritePath**: The error log output file to use for write failures that occur during processing. For example, `gs://your-bucket/errors/`. ### Optional parameters -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **firestoreHintNumWorkers** : Hint for the expected number of workers in the Firestore ramp-up throttling step. Default is 500. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **firestoreHintNumWorkers**: Hint for the expected number of workers in the Firestore ramp-up throttling step. The default value is `500`. ## User-Defined functions (UDFs) @@ -220,7 +220,7 @@ resource "google_dataflow_job" "gcs_text_to_firestore" { parameters = { textReadPattern = "" firestoreWriteProjectId = "" - errorWritePath = "gs://your-bucket/errors/" + errorWritePath = "" # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" # firestoreHintNumWorkers = "" diff --git a/v1/README_Jdbc_to_BigQuery.md b/v1/README_Jdbc_to_BigQuery.md index 435929e589..6ebdb7b27c 100644 --- a/v1/README_Jdbc_to_BigQuery.md +++ b/v1/README_Jdbc_to_BigQuery.md @@ -25,22 +25,22 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **driverJars** : Comma separate Cloud Storage paths for JDBC drivers. (Example: gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar). -* **driverClassName** : JDBC driver class name to use. (Example: com.mysql.jdbc.Driver). -* **connectionURL** : Url connection string to connect to the JDBC source. (Example: jdbc:mysql://some-host:3306/sampledb). -* **query** : Query to be executed on the source to extract the data. If a Cloud Storage path is given (gs://...), the query will be fetched from that file. (Example: select * from sampledb.sample_table). -* **outputTable** : BigQuery table location to write the output to. The table's schema must match the input objects. -* **bigQueryLoadingTemporaryDirectory** : Temporary directory for BigQuery loading process (Example: gs://your-bucket/your-files/temp_dir). +* **driverJars**: Comma separate Cloud Storage paths for JDBC drivers. For example, `gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar`. +* **driverClassName**: JDBC driver class name to use. For example, `com.mysql.jdbc.Driver`. +* **connectionURL**: Url connection string to connect to the JDBC source. For example, `jdbc:mysql://some-host:3306/sampledb`. +* **query**: Query to be executed on the source to extract the data. If a Cloud Storage path is given (gs://...), the query will be fetched from that file. For example, `select * from sampledb.sample_table`. +* **outputTable**: BigQuery table location to write the output to. The table's schema must match the input objects. +* **bigQueryLoadingTemporaryDirectory**: Temporary directory for BigQuery loading process For example, `gs://your-bucket/your-files/temp_dir`. ### Optional parameters -* **connectionProperties** : Properties string to use for the JDBC connection. Format of the string must be [propertyName=property;]*. (Example: unicode=true;characterEncoding=UTF-8). -* **username** : User name to be used for the JDBC connection. User name can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. -* **password** : Password to be used for the JDBC connection. Password can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. -* **KMSEncryptionKey** : If this parameter is provided, password, user name and connection string should all be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **useColumnAlias** : If enabled (set to true) the pipeline will consider column alias ("AS") instead of the column name to map the rows to BigQuery. Defaults to false. -* **disabledAlgorithms** : Comma-separated list of algorithms to disable. If this value is set to none, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. (Example: SSLv3, RC4). -* **extraFilesToStage** : Comma-separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. (Example: gs:///file.txt,projects//secrets//versions/). +* **connectionProperties**: Properties string to use for the JDBC connection. Format of the string must be [propertyName=property;]*. For example, `unicode=true;characterEncoding=UTF-8`. +* **username**: User name to be used for the JDBC connection. User name can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. +* **password**: Password to be used for the JDBC connection. Password can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. +* **KMSEncryptionKey**: If this parameter is provided, password, user name and connection string should all be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **useColumnAlias**: If enabled (set to true) the pipeline will consider column alias ("AS") instead of the column name to map the rows to BigQuery. Defaults to false. +* **disabledAlgorithms**: Comma-separated list of algorithms to disable. If this value is set to none, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. For example, `SSLv3, RC4`. +* **extraFilesToStage**: Comma-separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. For example, `gs:///file.txt,projects//secrets//versions/`. @@ -241,19 +241,19 @@ resource "google_dataflow_job" "jdbc_to_bigquery" { region = var.region temp_gcs_location = "gs://bucket-name-here/temp" parameters = { - driverJars = "gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar" - driverClassName = "com.mysql.jdbc.Driver" - connectionURL = "jdbc:mysql://some-host:3306/sampledb" - query = "select * from sampledb.sample_table" + driverJars = "" + driverClassName = "" + connectionURL = "" + query = "" outputTable = "" - bigQueryLoadingTemporaryDirectory = "gs://your-bucket/your-files/temp_dir" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" + bigQueryLoadingTemporaryDirectory = "" + # connectionProperties = "" # username = "" # password = "" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" + # KMSEncryptionKey = "" # useColumnAlias = "false" - # disabledAlgorithms = "SSLv3, RC4" - # extraFilesToStage = "gs:///file.txt,projects//secrets//versions/" + # disabledAlgorithms = "" + # extraFilesToStage = "" } } ``` diff --git a/v1/README_PubSub_Subscription_to_BigQuery.md b/v1/README_PubSub_Subscription_to_BigQuery.md index 4b3607e31b..abbe209540 100644 --- a/v1/README_PubSub_Subscription_to_BigQuery.md +++ b/v1/README_PubSub_Subscription_to_BigQuery.md @@ -20,15 +20,15 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **outputTableSpec** : The BigQuery output table location, in the format `:.`. -* **inputSubscription** : The Pub/Sub input subscription to read from, in the format `projects//subscriptions/`. +* **outputTableSpec**: The BigQuery output table location, in the format `:.`. +* **inputSubscription**: The Pub/Sub input subscription to read from, in the format `projects//subscriptions/`. ### Optional parameters -* **outputDeadletterTable** : The BigQuery table to use for messages that fail to reach the output table, in the format of `:.`. If the table doesn't exist, it is created during pipeline execution. If not specified, `OUTPUT_TABLE_SPEC_error_records` is used. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **javascriptTextTransformReloadIntervalMinutes** : Define the interval that workers may check for JavaScript UDF changes to reload the files. Defaults to: 0. +* **outputDeadletterTable**: The BigQuery table to use for messages that fail to reach the output table, in the format of `:.`. If the table doesn't exist, it is created during pipeline execution. If not specified, `OUTPUT_TABLE_SPEC_error_records` is used. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **javascriptTextTransformReloadIntervalMinutes**: Define the interval that workers may check for JavaScript UDF changes to reload the files. Defaults to: 0. ## User-Defined functions (UDFs) diff --git a/v1/README_PubSub_to_BigQuery.md b/v1/README_PubSub_to_BigQuery.md index fdab41b3af..bf88206103 100644 --- a/v1/README_PubSub_to_BigQuery.md +++ b/v1/README_PubSub_to_BigQuery.md @@ -20,15 +20,15 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **outputTableSpec** : The BigQuery output table location, in the format `:.`. -* **inputTopic** : The Pub/Sub topic to read the input from. +* **outputTableSpec**: The BigQuery output table location, in the format `:.`. +* **inputTopic**: The Pub/Sub topic to read the input from. ### Optional parameters -* **outputDeadletterTable** : The BigQuery table to use for messages that fail to reach the output table, in the format of `:.`. If the table doesn't exist, it is created during pipeline execution. If not specified, `OUTPUT_TABLE_SPEC_error_records` is used. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **javascriptTextTransformReloadIntervalMinutes** : Define the interval that workers may check for JavaScript UDF changes to reload the files. Defaults to: 0. +* **outputDeadletterTable**: The BigQuery table to use for messages that fail to reach the output table, in the format of `:.`. If the table doesn't exist, it is created during pipeline execution. If not specified, `OUTPUT_TABLE_SPEC_error_records` is used. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **javascriptTextTransformReloadIntervalMinutes**: Define the interval that workers may check for JavaScript UDF changes to reload the files. Defaults to: 0. ## User-Defined functions (UDFs) diff --git a/v1/README_Spanner_to_GCS_Text.md b/v1/README_Spanner_to_GCS_Text.md index 65e637306d..93ab53cfe8 100644 --- a/v1/README_Spanner_to_GCS_Text.md +++ b/v1/README_Spanner_to_GCS_Text.md @@ -18,19 +18,19 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **spannerTable** : The Spanner table to read the data from. -* **spannerProjectId** : The ID of the Google Cloud project that contains the Spanner database to read data from. -* **spannerInstanceId** : The instance ID of the requested table. -* **spannerDatabaseId** : The database ID of the requested table. -* **textWritePrefix** : The Cloud Storage path prefix that specifies where the data is written. (Example: gs://mybucket/somefolder/). +* **spannerTable**: The Spanner table to read the data from. +* **spannerProjectId**: The ID of the Google Cloud project that contains the Spanner database to read data from. +* **spannerInstanceId**: The instance ID of the requested table. +* **spannerDatabaseId**: The database ID of the requested table. +* **textWritePrefix**: The Cloud Storage path prefix that specifies where the data is written. For example, `gs://mybucket/somefolder/`. ### Optional parameters -* **csvTempDirectory** : The Cloud Storage path where temporary CSV files are written. (Example: gs://your-bucket/your-path). -* **spannerPriority** : The request priority (https://cloud.google.com/spanner/docs/reference/rest/v1/RequestOptions) for Spanner calls. Possible values are `HIGH`, `MEDIUM`, `LOW`. The default value is `MEDIUM`. -* **spannerHost** : The Cloud Spanner endpoint to call in the template. Only used for testing. (Example: https://batch-spanner.googleapis.com). Defaults to: https://batch-spanner.googleapis.com. -* **spannerSnapshotTime** : The timestamp that corresponds to the version of the Spanner database that you want to read from. The timestamp must be specified in the RFC 3339 (https://tools.ietf.org/html/rfc3339) UTC "Zulu" format. The timestamp must be in the past and maximum timestamp staleness (https://cloud.google.com/spanner/docs/timestamp-bounds#maximum_timestamp_staleness) applies. (Example: 1990-12-31T23:59:60Z). Defaults to empty. -* **dataBoostEnabled** : Set to `true` to use the compute resources of Spanner Data Boost to run the job with near-zero impact on Spanner OLTP workflows. When true, requires the `spanner.databases.useDataBoost` Identity and Access Management (IAM) permission. For more information, see Data Boost overview (https://cloud.google.com/spanner/docs/databoost/databoost-overview). Defaults to: false. +* **csvTempDirectory**: The Cloud Storage path where temporary CSV files are written. For example, `gs://your-bucket/your-path`. +* **spannerPriority**: The request priority (https://cloud.google.com/spanner/docs/reference/rest/v1/RequestOptions) for Spanner calls. Possible values are `HIGH`, `MEDIUM`, `LOW`. The default value is `MEDIUM`. +* **spannerHost**: The Cloud Spanner endpoint to call in the template. Only used for testing. For example, `https://batch-spanner.googleapis.com`. Defaults to: https://batch-spanner.googleapis.com. +* **spannerSnapshotTime**: The timestamp that corresponds to the version of the Spanner database that you want to read from. The timestamp must be specified in the RFC 3339 (https://tools.ietf.org/html/rfc3339) UTC Zulu Time format. The timestamp must be in the past and maximum timestamp staleness (https://cloud.google.com/spanner/docs/timestamp-bounds#maximum_timestamp_staleness) applies. For example, `1990-12-31T23:59:60Z`. Defaults to empty. +* **dataBoostEnabled**: Set to `true` to use the compute resources of Spanner Data Boost to run the job with near-zero impact on Spanner OLTP workflows. When true, requires the `spanner.databases.useDataBoost` Identity and Access Management (IAM) permission. For more information, see Data Boost overview (https://cloud.google.com/spanner/docs/databoost/databoost-overview). Defaults to: false. @@ -226,11 +226,11 @@ resource "google_dataflow_job" "spanner_to_gcs_text" { spannerProjectId = "" spannerInstanceId = "" spannerDatabaseId = "" - textWritePrefix = "gs://mybucket/somefolder/" - # csvTempDirectory = "gs://your-bucket/your-path" + textWritePrefix = "" + # csvTempDirectory = "" # spannerPriority = "" # spannerHost = "https://batch-spanner.googleapis.com" - # spannerSnapshotTime = "1990-12-31T23:59:60Z" + # spannerSnapshotTime = "" # dataBoostEnabled = "false" } } diff --git a/v1/README_Stream_DLP_GCS_Text_to_BigQuery.md b/v1/README_Stream_DLP_GCS_Text_to_BigQuery.md index 7f8079200a..41f81a6f12 100644 --- a/v1/README_Stream_DLP_GCS_Text_to_BigQuery.md +++ b/v1/README_Stream_DLP_GCS_Text_to_BigQuery.md @@ -34,15 +34,15 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputFilePattern** : The CSV files to read input data records from. Wildcards are also accepted. (Example: gs://mybucket/my_csv_filename.csv or gs://mybucket/file-*.csv). -* **deidentifyTemplateName** : The Sensitive Data Protection de-identification template to use for API requests, specified with the pattern projects//deidentifyTemplates/. (Example: projects/your-project-id/locations/global/deidentifyTemplates/generated_template_id). -* **datasetName** : The BigQuery dataset to use when sending tokenized results. The dataset must exist prior to execution. -* **dlpProjectId** : The ID for the Google Cloud project that owns the DLP API resource. This project can be the same project that owns the Sensitive Data Protection templates, or it can be a separate project. +* **inputFilePattern**: The CSV files to read input data records from. Wildcards are also accepted. For example, `gs://mybucket/my_csv_filename.csv or gs://mybucket/file-*.csv`. +* **deidentifyTemplateName**: The Sensitive Data Protection de-identification template to use for API requests, specified with the pattern `projects//deidentifyTemplates/`. For example, `projects/your-project-id/locations/global/deidentifyTemplates/generated_template_id`. +* **datasetName**: The BigQuery dataset to use when sending tokenized results. The dataset must exist prior to execution. +* **dlpProjectId**: The ID for the Google Cloud project that owns the DLP API resource. This project can be the same project that owns the Sensitive Data Protection templates, or it can be a separate project. ### Optional parameters -* **inspectTemplateName** : The Sensitive Data Protection inspection template to use for API requests, specified with the pattern projects//identifyTemplates/. (Example: projects/your-project-id/locations/global/inspectTemplates/generated_template_id). -* **batchSize** : The chunking or batch size to use for sending data to inspect and detokenize. For a CSV file, the value of `batchSize` is the number of rows in a batch. Determine the batch size based on the size of the records and the sizing of the file. The DLP API has a payload size limit of 524 KB per API call. +* **inspectTemplateName**: The Sensitive Data Protection inspection template to use for API requests, specified with the pattern `projects//identifyTemplates/`. For example, `projects/your-project-id/locations/global/inspectTemplates/generated_template_id`. +* **batchSize**: The chunking or batch size to use for sending data to inspect and detokenize. For a CSV file, the value of `batchSize` is the number of rows in a batch. Determine the batch size based on the size of the records and the sizing of the file. The DLP API has a payload size limit of 524 KB per API call. @@ -222,11 +222,11 @@ resource "google_dataflow_job" "stream_dlp_gcs_text_to_bigquery" { region = var.region temp_gcs_location = "gs://bucket-name-here/temp" parameters = { - inputFilePattern = "gs://mybucket/my_csv_filename.csv or gs://mybucket/file-*.csv" - deidentifyTemplateName = "projects/your-project-id/locations/global/deidentifyTemplates/generated_template_id" + inputFilePattern = "" + deidentifyTemplateName = "" datasetName = "" dlpProjectId = "" - # inspectTemplateName = "projects/your-project-id/locations/global/inspectTemplates/generated_template_id" + # inspectTemplateName = "" # batchSize = "" } } diff --git a/v1/README_Stream_GCS_Text_to_BigQuery.md b/v1/README_Stream_GCS_Text_to_BigQuery.md index edaa98aea2..5abd7dd324 100644 --- a/v1/README_Stream_GCS_Text_to_BigQuery.md +++ b/v1/README_Stream_GCS_Text_to_BigQuery.md @@ -30,8 +30,8 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputFilePattern** : Path of the file pattern glob to read from. (Example: gs://your-bucket/path/*.csv). -* **JSONPath** : JSON file with BigQuery Schema description. JSON Example: { +* **inputFilePattern**: Path of the file pattern glob to read from. For example, `gs://your-bucket/path/*.csv`. +* **JSONPath**: JSON file with BigQuery Schema description. JSON Example: { "BigQuery Schema": [ { "name": "location", @@ -55,15 +55,15 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat } ] }. -* **outputTable** : BigQuery table location to write the output to. The table's schema must match the input objects. -* **bigQueryLoadingTemporaryDirectory** : Temporary directory for BigQuery loading process (Example: gs://your-bucket/your-files/temp_dir). +* **outputTable**: BigQuery table location to write the output to. The table's schema must match the input objects. +* **bigQueryLoadingTemporaryDirectory**: Temporary directory for BigQuery loading process For example, `gs://your-bucket/your-files/temp_dir`. ### Optional parameters -* **outputDeadletterTable** : BigQuery table for failed messages. Messages failed to reach the output table for different reasons (e.g., mismatched schema, malformed json) are written to this table. If it doesn't exist, it will be created during pipeline execution. If not specified, "outputTableSpec_error_records" is used instead. (Example: your-project-id:your-dataset.your-table-name). -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **javascriptTextTransformReloadIntervalMinutes** : Define the interval that workers may check for JavaScript UDF changes to reload the files. Defaults to: 0. +* **outputDeadletterTable**: BigQuery table for failed messages. Messages failed to reach the output table for different reasons (e.g., mismatched schema, malformed json) are written to this table. If it doesn't exist, it will be created during pipeline execution. If not specified, "outputTableSpec_error_records" is used instead. For example, `your-project-id:your-dataset.your-table-name`. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **javascriptTextTransformReloadIntervalMinutes**: Define the interval that workers may check for JavaScript UDF changes to reload the files. Defaults to: 0. ## User-Defined functions (UDFs) @@ -259,11 +259,11 @@ resource "google_dataflow_job" "stream_gcs_text_to_bigquery" { region = var.region temp_gcs_location = "gs://bucket-name-here/temp" parameters = { - inputFilePattern = "gs://your-bucket/path/*.csv" + inputFilePattern = "" JSONPath = "" outputTable = "" - bigQueryLoadingTemporaryDirectory = "gs://your-bucket/your-files/temp_dir" - # outputDeadletterTable = "your-project-id:your-dataset.your-table-name" + bigQueryLoadingTemporaryDirectory = "" + # outputDeadletterTable = "" # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" # javascriptTextTransformReloadIntervalMinutes = "0" diff --git a/v1/README_Stream_GCS_Text_to_Cloud_PubSub.md b/v1/README_Stream_GCS_Text_to_Cloud_PubSub.md index bb93150e7b..0786cd1296 100644 --- a/v1/README_Stream_GCS_Text_to_Cloud_PubSub.md +++ b/v1/README_Stream_GCS_Text_to_Cloud_PubSub.md @@ -32,8 +32,8 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputFilePattern** : The input file pattern to read from. (Example: gs://bucket-name/files/*.json). -* **outputTopic** : The Pub/Sub input topic to write to. The name must be in the format `projects//topics/`. (Example: projects/your-project-id/topics/your-topic-name). +* **inputFilePattern**: The input file pattern to read from. For example, `gs://bucket-name/files/*.json`. +* **outputTopic**: The Pub/Sub input topic to write to. The name must be in the format `projects//topics/`. For example, `projects/your-project-id/topics/your-topic-name`. ### Optional parameters @@ -204,8 +204,8 @@ resource "google_dataflow_job" "stream_gcs_text_to_cloud_pubsub" { region = var.region temp_gcs_location = "gs://bucket-name-here/temp" parameters = { - inputFilePattern = "gs://bucket-name/files/*.json" - outputTopic = "projects/your-project-id/topics/your-topic-name" + inputFilePattern = "" + outputTopic = "" } } ``` diff --git a/v1/README_Word_Count.md b/v1/README_Word_Count.md index 0763b723f1..2e7ec5e8c1 100644 --- a/v1/README_Word_Count.md +++ b/v1/README_Word_Count.md @@ -14,8 +14,8 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputFile** : The input file pattern Dataflow reads from. Use the example file (gs://dataflow-samples/shakespeare/kinglear.txt) or enter the path to your own using the same format: gs://your-bucket/your-file.txt. -* **output** : Path and filename prefix for writing output files. Ex: gs://your-bucket/counts. +* **inputFile**: The input file pattern Dataflow reads from. Use the example file (gs://dataflow-samples/shakespeare/kinglear.txt) or enter the path to your own using the same format: gs://your-bucket/your-file.txt. +* **output**: Path and filename prefix for writing output files. Ex: gs://your-bucket/counts. ### Optional parameters diff --git a/v1/src/main/java/com/google/cloud/teleport/bigtable/AvroToBigtable.java b/v1/src/main/java/com/google/cloud/teleport/bigtable/AvroToBigtable.java index 77aa88891e..0d89795f3b 100644 --- a/v1/src/main/java/com/google/cloud/teleport/bigtable/AvroToBigtable.java +++ b/v1/src/main/java/com/google/cloud/teleport/bigtable/AvroToBigtable.java @@ -114,7 +114,7 @@ public interface Options extends PipelineOptions { groupName = "Source", description = "Input Cloud Storage File(s)", helpText = "The Cloud Storage path pattern where data is located.", - example = "gs:////*") + example = "gs:///FOLDER/PREFIX*") ValueProvider getInputFilePattern(); @SuppressWarnings("unused") @@ -125,7 +125,7 @@ public interface Options extends PipelineOptions { optional = true, description = "If true, large rows will be split into multiple MutateRows requests", helpText = - "The flag for enabling splitting of large rows into multiple MutateRows requests. Note that when a large row is split between multiple API calls, the updates to the row are not atomic. ") + "The flag for enabling splitting of large rows into multiple MutateRows requests. Note that when a large row is split between multiple API calls, the updates to the row are not atomic.") ValueProvider getSplitLargeRows(); void setSplitLargeRows(ValueProvider splitLargeRows); diff --git a/v1/src/main/java/com/google/cloud/teleport/bigtable/BigtableToJson.java b/v1/src/main/java/com/google/cloud/teleport/bigtable/BigtableToJson.java index 56e6ecb163..e3fb5ffcdb 100644 --- a/v1/src/main/java/com/google/cloud/teleport/bigtable/BigtableToJson.java +++ b/v1/src/main/java/com/google/cloud/teleport/bigtable/BigtableToJson.java @@ -128,7 +128,7 @@ public interface Options extends PipelineOptions { optional = true, description = "JSON file prefix", helpText = - "The prefix of the JSON file name. For example, \"table1-\". If no value is provided, defaults to `part`.") + "The prefix of the JSON file name. For example, `table1-`. If no value is provided, defaults to `part`.") @Default.String("part") ValueProvider getFilenamePrefix(); diff --git a/v1/src/main/java/com/google/cloud/teleport/bigtable/BigtableToParquet.java b/v1/src/main/java/com/google/cloud/teleport/bigtable/BigtableToParquet.java index 4e3c5e7798..cc0855032e 100644 --- a/v1/src/main/java/com/google/cloud/teleport/bigtable/BigtableToParquet.java +++ b/v1/src/main/java/com/google/cloud/teleport/bigtable/BigtableToParquet.java @@ -111,7 +111,7 @@ public interface Options extends PipelineOptions { groupName = "Target", description = "Output file directory in Cloud Storage", helpText = - "The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse the directory path for date and time formatters. For example: gs://your-bucket/your-path.") + "The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse the directory path for date and time formatters. For example: `gs://your-bucket/your-path`.") ValueProvider getOutputDirectory(); @SuppressWarnings("unused") @@ -122,7 +122,7 @@ public interface Options extends PipelineOptions { groupName = "Target", description = "Parquet file prefix", helpText = - "The prefix of the Parquet file name. For example, \"table1-\". Defaults to: part.") + "The prefix of the Parquet file name. For example, `table1-`. Defaults to: `part`.") @Default.String("part") ValueProvider getFilenamePrefix(); diff --git a/v1/src/main/java/com/google/cloud/teleport/bigtable/BigtableToVectorEmbeddings.java b/v1/src/main/java/com/google/cloud/teleport/bigtable/BigtableToVectorEmbeddings.java index 953d62f341..4f51154910 100644 --- a/v1/src/main/java/com/google/cloud/teleport/bigtable/BigtableToVectorEmbeddings.java +++ b/v1/src/main/java/com/google/cloud/teleport/bigtable/BigtableToVectorEmbeddings.java @@ -134,7 +134,7 @@ public interface Options extends PipelineOptions { optional = true, description = "JSON file prefix", helpText = - "The prefix of the JSON filename. For example: \"table1-\". If no value is provided, defaults to \"part\".") + "The prefix of the JSON filename. For example: `table1-`. If no value is provided, defaults to `part`.") @Default.String("part") ValueProvider getFilenamePrefix(); @@ -145,7 +145,7 @@ public interface Options extends PipelineOptions { order = 6, description = "ID column", helpText = - "The fully qualified column name where the ID is stored. In the format cf:col or _key.") + "The fully qualified column name where the ID is stored. In the format `cf:col` or `_key`.") ValueProvider getIdColumn(); @SuppressWarnings("unused") @@ -155,7 +155,7 @@ public interface Options extends PipelineOptions { order = 7, description = "Embedding column", helpText = - "The fully qualified column name where the embeddings are stored. In the format cf:col or _key.") + "The fully qualified column name where the embeddings are stored. In the format `cf:col` or `_key`.") ValueProvider getEmbeddingColumn(); @SuppressWarnings("unused") @@ -166,7 +166,7 @@ public interface Options extends PipelineOptions { optional = true, description = "Crowding tag column", helpText = - "The fully qualified column name where the crowding tag is stored. In the format cf:col or _key.") + "The fully qualified column name where the crowding tag is stored. In the format `cf:col` or `_key`.") ValueProvider getCrowdingTagColumn(); @SuppressWarnings("unused") @@ -177,7 +177,7 @@ public interface Options extends PipelineOptions { optional = true, description = "The byte size of the embeddings array. Can be 4 or 8.", helpText = - "The byte size of each entry in the embeddings array. For float, use the value 4. For double, use the value 8. Defaults to 4.") + "The byte size of each entry in the embeddings array. For float, use the value `4`. For double, use the value `8`. Defaults to `4`.") @Default.Integer(4) ValueProvider getEmbeddingByteSize(); @@ -189,7 +189,7 @@ public interface Options extends PipelineOptions { optional = true, description = "Allow restricts mappings", helpText = - "The comma-separated, fully qualified column names for the columns to use as the allow restricts, with their aliases. In the format cf:col->alias.") + "The comma-separated, fully qualified column names for the columns to use as the allow restricts, with their aliases. In the format `cf:col->alias`.") ValueProvider getAllowRestrictsMappings(); @SuppressWarnings("unused") @@ -200,7 +200,7 @@ public interface Options extends PipelineOptions { optional = true, description = "Deny restricts mappings", helpText = - "The comma-separated, fully qualified column names for the columns to use as the deny restricts, with their aliases. In the format cf:col->alias.") + "The comma-separated, fully qualified column names for the columns to use as the deny restricts, with their aliases. In the format `cf:col->alias`.") ValueProvider getDenyRestrictsMappings(); @SuppressWarnings("unused") @@ -211,7 +211,7 @@ public interface Options extends PipelineOptions { optional = true, description = "Integer numeric restricts mappings", helpText = - "The comma-separated, fully qualified column names of the columns to use as integer numeric_restricts, with their aliases. In the format cf:col->alias.") + "The comma-separated, fully qualified column names of the columns to use as integer numeric_restricts, with their aliases. In the format `cf:col->alias`.") ValueProvider getIntNumericRestrictsMappings(); @SuppressWarnings("unused") @@ -222,7 +222,7 @@ public interface Options extends PipelineOptions { optional = true, description = "Float numeric restricts mappings", helpText = - "The comma-separated, fully qualified column names of the columns to use as float (4 bytes) numeric_restricts, with their aliases. In the format cf:col->alias.") + "The comma-separated, fully qualified column names of the columns to use as float (4 bytes) numeric_restricts, with their aliases. In the format `cf:col->alias`.") ValueProvider getFloatNumericRestrictsMappings(); @SuppressWarnings("unused") @@ -233,7 +233,7 @@ public interface Options extends PipelineOptions { optional = true, description = "Double numeric restricts mappings", helpText = - "The comma-separated, fully qualified column names of the columns to use as double (8 bytes) numeric_restricts, with their aliases. In the format cf:col->alias.") + "The comma-separated, fully qualified column names of the columns to use as double (8 bytes) numeric_restricts, with their aliases. In the format `cf:col->alias`.") ValueProvider getDoubleNumericRestrictsMappings(); @SuppressWarnings("unused") diff --git a/v1/src/main/java/com/google/cloud/teleport/bigtable/CassandraToBigtable.java b/v1/src/main/java/com/google/cloud/teleport/bigtable/CassandraToBigtable.java index 71d99346d6..b60ca3cf90 100644 --- a/v1/src/main/java/com/google/cloud/teleport/bigtable/CassandraToBigtable.java +++ b/v1/src/main/java/com/google/cloud/teleport/bigtable/CassandraToBigtable.java @@ -87,7 +87,7 @@ public interface Options extends PipelineOptions { optional = true, description = "Cassandra Port", helpText = - "The TCP port to use to reach Apache Cassandra on the nodes. The default value is 9042.") + "The TCP port to use to reach Apache Cassandra on the nodes. The default value is `9042`.") @Default.Integer(9042) ValueProvider getCassandraPort(); @@ -155,7 +155,7 @@ public interface Options extends PipelineOptions { regexes = {"[-_.a-zA-Z0-9]+"}, description = "The Default Bigtable Column Family", helpText = - "The name of the column family of the Bigtable table. The default value is default.") + "The name of the column family of the Bigtable table. The default value is `default`.") @Default.String("default") ValueProvider getDefaultColumnFamily(); @@ -167,7 +167,7 @@ public interface Options extends PipelineOptions { groupName = "Target", optional = true, description = "The Row Key Separator", - helpText = "The separator used to build row-keys. The default value is '#'.") + helpText = "The separator used to build row-keys. The default value is `#`.") @Default.String("#") ValueProvider getRowKeySeparator(); diff --git a/v1/src/main/java/com/google/cloud/teleport/bigtable/ParquetToBigtable.java b/v1/src/main/java/com/google/cloud/teleport/bigtable/ParquetToBigtable.java index ddd71186ac..90408e38fb 100644 --- a/v1/src/main/java/com/google/cloud/teleport/bigtable/ParquetToBigtable.java +++ b/v1/src/main/java/com/google/cloud/teleport/bigtable/ParquetToBigtable.java @@ -129,7 +129,7 @@ public interface Options extends PipelineOptions { optional = true, description = "If true, large rows will be split into multiple MutateRows requests", helpText = - "The flag for enabling splitting of large rows into multiple MutateRows requests. Note that when a large row is split between multiple API calls, the updates to the row are not atomic. ") + "The flag for enabling splitting of large rows into multiple MutateRows requests. Note that when a large row is split between multiple API calls, the updates to the row are not atomic.") ValueProvider getSplitLargeRows(); void setSplitLargeRows(ValueProvider splitLargeRows); diff --git a/v1/src/main/java/com/google/cloud/teleport/options/WindowedFilenamePolicyOptions.java b/v1/src/main/java/com/google/cloud/teleport/options/WindowedFilenamePolicyOptions.java index 73354f402c..b548399cca 100644 --- a/v1/src/main/java/com/google/cloud/teleport/options/WindowedFilenamePolicyOptions.java +++ b/v1/src/main/java/com/google/cloud/teleport/options/WindowedFilenamePolicyOptions.java @@ -72,7 +72,7 @@ public interface WindowedFilenamePolicyOptions extends PipelineOptions { helpText = "Pattern for formatting the year. Must be one or more of `y` or `Y`. Case makes no" + " difference in the year. Optionally, wrap the pattern with characters that" - + " aren't alphanumeric or the directory ('/') character. Defaults to `YYYY`.") + + " aren't alphanumeric or the directory (`/`) character. Defaults to `YYYY`.") ValueProvider getYearPattern(); void setYearPattern(ValueProvider yearPattern); @@ -85,7 +85,7 @@ public interface WindowedFilenamePolicyOptions extends PipelineOptions { helpText = "Pattern for formatting the month. Must be one or more of the `M` character. " + "Optionally, wrap the pattern with characters that aren't alphanumeric or the " - + "directory ('/') character. Defaults to `MM`.") + + "directory (`/`) character. Defaults to `MM`.") ValueProvider getMonthPattern(); void setMonthPattern(ValueProvider monthPattern); @@ -98,7 +98,7 @@ public interface WindowedFilenamePolicyOptions extends PipelineOptions { helpText = "Pattern for formatting the day. Must be one or more of `d` for day of month or `D` for" + " day of year. Optionally," - + " wrap the pattern with characters that aren't alphanumeric or the directory ('/')" + + " wrap the pattern with characters that aren't alphanumeric or the directory (`/`)" + " character. Defaults to `dd`.") ValueProvider getDayPattern(); @@ -112,7 +112,7 @@ public interface WindowedFilenamePolicyOptions extends PipelineOptions { helpText = "Pattern for formatting the hour. Must be one or more of the `H` character. Optionally," + " wrap the pattern with characters that aren't alphanumeric or the directory" - + " ('/') character. Defaults to `HH`.") + + " (`/`) character. Defaults to `HH`.") ValueProvider getHourPattern(); void setHourPattern(ValueProvider hourPattern); @@ -125,7 +125,7 @@ public interface WindowedFilenamePolicyOptions extends PipelineOptions { helpText = "Pattern for formatting the minute. Must be one or more of the `m` character. Optionally," + " wrap the pattern with characters that aren't alphanumeric or the directory" - + " ('/') character. Defaults to `mm`.") + + " (`/`) character. Defaults to `mm`.") ValueProvider getMinutePattern(); void setMinutePattern(ValueProvider minutePattern); diff --git a/v1/src/main/java/com/google/cloud/teleport/spanner/ExportPipeline.java b/v1/src/main/java/com/google/cloud/teleport/spanner/ExportPipeline.java index 215664eb5d..393d0673d6 100644 --- a/v1/src/main/java/com/google/cloud/teleport/spanner/ExportPipeline.java +++ b/v1/src/main/java/com/google/cloud/teleport/spanner/ExportPipeline.java @@ -172,7 +172,7 @@ public interface ExportPipelineOptions extends PipelineOptions { optional = true, description = "Export Timestamps as Timestamp-micros type", helpText = - "If true, timestamps are exported as a `long` type with `timestamp-micros` logical type. By default, this parameter is set to `false` and timestamps are exported as ISO-8601 strings at nanosecond precision.") + "If `true`, timestamps are exported as a `long` type with `timestamp-micros` logical type. By default, this parameter is set to `false` and timestamps are exported as ISO-8601 strings at nanosecond precision.") @Default.Boolean(false) ValueProvider getShouldExportTimestampAsLogicalType(); diff --git a/v1/src/main/java/com/google/cloud/teleport/spanner/TextImportPipeline.java b/v1/src/main/java/com/google/cloud/teleport/spanner/TextImportPipeline.java index b5c9618160..42bb5f6952 100644 --- a/v1/src/main/java/com/google/cloud/teleport/spanner/TextImportPipeline.java +++ b/v1/src/main/java/com/google/cloud/teleport/spanner/TextImportPipeline.java @@ -152,7 +152,7 @@ public interface Options extends PipelineOptions { groupName = "Source", optional = true, description = "Column delimiter of the data files", - helpText = "The column delimiter that the source file uses. The default value is ','.", + helpText = "The column delimiter that the source file uses. The default value is `,`.", example = ",") @Default.Character(',') ValueProvider getColumnDelimiter(); @@ -166,7 +166,7 @@ public interface Options extends PipelineOptions { description = "Field qualifier used by the source file", helpText = "The character that must surround any value in the source file that " - + "contains the columnDelimiter. The default value is \".") + + "contains the columnDelimiter. The default value is double quotes.") @Default.Character('"') ValueProvider getFieldQualifier(); @@ -179,7 +179,7 @@ public interface Options extends PipelineOptions { description = "If true, the lines has trailing delimiters", helpText = "Specifies whether the lines in the source files have trailing delimiters, that is, whether the " - + "`columnDelimiter` character appears at the end of each line, after the last column value). " + + "`columnDelimiter` character appears at the end of each line, after the last column value. " + "The default value is `true`.") @Default.Boolean(true) ValueProvider getTrailingDelimiter(); @@ -218,7 +218,7 @@ public interface Options extends PipelineOptions { description = "Date format", helpText = "The format used to parse date columns. By default, the pipeline tries to parse the date columns " - + "as `yyyy-M-d[' 00:00:00']`, for example, as 2019-01-31 or 2019-1-1 00:00:00. If your date format " + + "as `yyyy-M-d[' 00:00:00']`, for example, as `2019-01-31` or `2019-1-1 00:00:00`. If your date format " + "is different, specify the format using the java.time.format.DateTimeFormatter " + "(https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/time/format/DateTimeFormatter.html) patterns.") ValueProvider getDateFormat(); @@ -232,11 +232,10 @@ public interface Options extends PipelineOptions { description = "Timestamp format", helpText = "The format used to parse timestamp columns. If the timestamp is a long integer, then it is parsed " - + "as Unix epoch time. Otherwise, it is parsed as a string using the " - + "java.time.format.DateTimeFormatter.ISO_INSTANT " + + "as Unix epoch time. Otherwise, it is parsed as a string using the java.time.format.DateTimeFormatter.ISO_INSTANT " + "(https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/time/format/DateTimeFormatter.html#ISO_INSTANT) format. " + "For other cases, specify your own pattern string, for example, using `MMM dd yyyy HH:mm:ss.SSSVV` " - + "for timestamps in the form of `\"Jan 21 1998 01:02:03.456+08:00\"`.") + + "for timestamps in the form of `Jan 21 1998 01:02:03.456+08:00`.") ValueProvider getTimestampFormat(); void setTimestampFormat(ValueProvider value); @@ -271,7 +270,7 @@ public interface Options extends PipelineOptions { description = "Priority for Spanner RPC invocations", helpText = "The request priority for Spanner calls. Possible values " - + "are HIGH, MEDIUM, and LOW. The default value is MEDIUM.") + + "are `HIGH`, `MEDIUM`, and `LOW`. The default value is `MEDIUM`.") ValueProvider getSpannerPriority(); void setSpannerPriority(ValueProvider value); diff --git a/v1/src/main/java/com/google/cloud/teleport/templates/BigQueryToTFRecord.java b/v1/src/main/java/com/google/cloud/teleport/templates/BigQueryToTFRecord.java index fd6cde8cbe..0d6d4a3317 100644 --- a/v1/src/main/java/com/google/cloud/teleport/templates/BigQueryToTFRecord.java +++ b/v1/src/main/java/com/google/cloud/teleport/templates/BigQueryToTFRecord.java @@ -333,7 +333,7 @@ public interface Options extends BigQueryReadOptions { groupName = "Target", description = "Output Cloud Storage directory.", helpText = - "The top-level Cloud Storage path prefix to use when writing the training, testing, and validation TFRecord files. Subdirectories for resulting training, testing, and validation TFRecord files are automatically generated from `outputDirectory`. For example, `gs://mybucket/output/train`", + "The top-level Cloud Storage path prefix to use when writing the training, testing, and validation TFRecord files. Subdirectories for resulting training, testing, and validation TFRecord files are automatically generated from `outputDirectory`.", example = "gs://mybucket/output") ValueProvider getOutputDirectory(); @@ -357,7 +357,7 @@ public interface Options extends BigQueryReadOptions { optional = true, description = "Percentage of data to be in the training set ", helpText = - "The percentage of query data allocated to training TFRecord files. The default value is 1, or 100%.") + "The percentage of query data allocated to training TFRecord files. The default value is `1`, or `100%`.") @Default.Float(1) ValueProvider getTrainingPercentage(); @@ -368,7 +368,7 @@ public interface Options extends BigQueryReadOptions { optional = true, description = "Percentage of data to be in the testing set ", helpText = - "The percentage of query data allocated to testing TFRecord files. The default value is 0, or 0%.") + "The percentage of query data allocated to testing TFRecord files. The default value is `0`, or `0%`.") @Default.Float(0) ValueProvider getTestingPercentage(); @@ -379,7 +379,7 @@ public interface Options extends BigQueryReadOptions { optional = true, description = "Percentage of data to be in the validation set ", helpText = - "The percentage of query data allocated to validation TFRecord files. The default value is 0, or 0%.") + "The percentage of query data allocated to validation TFRecord files. The default value is `0`, or `0%`.") @Default.Float(0) ValueProvider getValidationPercentage(); diff --git a/v1/src/main/java/com/google/cloud/teleport/templates/DLPTextToBigQueryStreaming.java b/v1/src/main/java/com/google/cloud/teleport/templates/DLPTextToBigQueryStreaming.java index a9e6d61c05..f1190b991f 100644 --- a/v1/src/main/java/com/google/cloud/teleport/templates/DLPTextToBigQueryStreaming.java +++ b/v1/src/main/java/com/google/cloud/teleport/templates/DLPTextToBigQueryStreaming.java @@ -304,7 +304,7 @@ public interface TokenizePipelineOptions extends DataflowPipelineOptions { }, description = "Cloud DLP deidentify template name", helpText = - "The Sensitive Data Protection de-identification template to use for API requests, specified with the pattern projects//deidentifyTemplates/.", + "The Sensitive Data Protection de-identification template to use for API requests, specified with the pattern `projects//deidentifyTemplates/`.", example = "projects/your-project-id/locations/global/deidentifyTemplates/generated_template_id") @Required @@ -322,7 +322,7 @@ public interface TokenizePipelineOptions extends DataflowPipelineOptions { description = "Cloud DLP inspect template name", helpText = "The Sensitive Data Protection inspection template to use for API requests, specified" - + " with the pattern projects//identifyTemplates/.", + + " with the pattern `projects//identifyTemplates/`.", example = "projects/your-project-id/locations/global/inspectTemplates/generated_template_id") ValueProvider getInspectTemplateName(); diff --git a/v1/src/main/java/com/google/cloud/teleport/templates/PubsubToAvro.java b/v1/src/main/java/com/google/cloud/teleport/templates/PubsubToAvro.java index 2e710bad8d..b1e755707f 100644 --- a/v1/src/main/java/com/google/cloud/teleport/templates/PubsubToAvro.java +++ b/v1/src/main/java/com/google/cloud/teleport/templates/PubsubToAvro.java @@ -97,10 +97,8 @@ public interface Options order = 1, groupName = "Source", description = "Pub/Sub input subscription", - helpText = - "Pub/Sub subscription to read the input from, in the format of" - + " 'projects/your-project-id/subscriptions/your-subscription-name'", - example = "projects/your-project-id/subscriptions/your-subscription-name") + helpText = "The Pub/Sub subscription to read the input from.", + example = "projects//subscriptions/") ValueProvider getInputSubscription(); void setInputSubscription(ValueProvider value); @@ -110,7 +108,7 @@ public interface Options groupName = "Source", description = "Pub/Sub input topic", helpText = - "The Pub/Sub topic to subscribe to for message consumption. The topic name must be in the format projects//topics/.") + "The Pub/Sub topic to subscribe to for message consumption. The topic name must be in the format `projects//topics/`.") ValueProvider getInputTopic(); void setInputTopic(ValueProvider value); @@ -128,7 +126,7 @@ public interface Options groupName = "Target", description = "Output file directory in Cloud Storage", helpText = - "The output directory where output Avro files are archived. Must contain / at the end. For example: gs://example-bucket/example-directory/") + "The output directory where output Avro files are archived. Must contain `/` at the end. For example: `gs://example-bucket/example-directory/`") @Required ValueProvider getOutputDirectory(); @@ -161,7 +159,7 @@ public interface Options order = 7, description = "Temporary Avro write directory", helpText = - "The directory for temporary Avro files. Must contain / at the end. For example: gs://example-bucket/example-directory/.") + "The directory for temporary Avro files. Must contain `/` at the end. For example: `gs://example-bucket/example-directory/`.") @Required ValueProvider getAvroTempDirectory(); diff --git a/v1/src/main/java/com/google/cloud/teleport/templates/SpannerVectorEmbeddingExport.java b/v1/src/main/java/com/google/cloud/teleport/templates/SpannerVectorEmbeddingExport.java index 1c6fed5028..be84cf59f5 100644 --- a/v1/src/main/java/com/google/cloud/teleport/templates/SpannerVectorEmbeddingExport.java +++ b/v1/src/main/java/com/google/cloud/teleport/templates/SpannerVectorEmbeddingExport.java @@ -177,7 +177,7 @@ public interface SpannerToVectorEmbeddingJsonOptions extends PipelineOptions { }, description = "Timestamp to read stale data from a version in the past.", helpText = - "If set, specifies the time when the database version must be taken. The value is a string in the RFC-3339 date format in Unix epoch time. For example: 1990-12-31T23:59:60Z. The timestamp must be in the past, and maximum timestamp staleness (https://cloud.google.com/spanner/docs/timestamp-bounds#maximum_timestamp_staleness) applies. If not set, a strong bound (https://cloud.google.com/spanner/docs/timestamp-bounds#strong) is used to read the latest data. Defaults to empty.", + "If set, specifies the time when the database version must be taken. The value is a string in the RFC-3339 date format in Unix epoch time. For example: `1990-12-31T23:59:60Z`. The timestamp must be in the past, and maximum timestamp staleness (https://cloud.google.com/spanner/docs/timestamp-bounds#maximum_timestamp_staleness) applies. If not set, a strong bound (https://cloud.google.com/spanner/docs/timestamp-bounds#strong) is used to read the latest data. Defaults to `empty`.", example = "1990-12-31T23:59:60Z") @Default.String(value = "") ValueProvider getSpannerVersionTime(); @@ -190,7 +190,7 @@ public interface SpannerToVectorEmbeddingJsonOptions extends PipelineOptions { optional = true, description = "Use independent compute resource (Spanner DataBoost).", helpText = - "When set to true, the template uses Spanner on-demand compute. The export job runs on independent compute resources that don't impact current Spanner workloads. Using this option incurs additional charges in Spanner. For more information, see Spanner Data Boost overview (https://cloud.google.com/spanner/docs/databoost/databoost-overview). Defaults to: false.") + "When set to `true`, the template uses Spanner on-demand compute. The export job runs on independent compute resources that don't impact current Spanner workloads. Using this option incurs additional charges in Spanner. For more information, see Spanner Data Boost overview (https://cloud.google.com/spanner/docs/databoost/databoost-overview). Defaults to: `false`.") @Default.Boolean(false) ValueProvider getSpannerDataBoostEnabled(); @@ -207,7 +207,7 @@ public interface SpannerToVectorEmbeddingJsonOptions extends PipelineOptions { optional = true, description = "Priority for Spanner RPC invocations", helpText = - "The request priority for Spanner calls. The allowed values are HIGH, MEDIUM, and LOW. The default value is MEDIUM.") + "The request priority for Spanner calls. The allowed values are `HIGH`, `MEDIUM`, and `LOW`. The default value is `MEDIUM`.") ValueProvider getSpannerPriority(); void setSpannerPriority(ValueProvider value); diff --git a/v1/src/main/java/com/google/cloud/teleport/templates/common/BigQueryConverters.java b/v1/src/main/java/com/google/cloud/teleport/templates/common/BigQueryConverters.java index 6ed1017412..ed0ebbf91b 100644 --- a/v1/src/main/java/com/google/cloud/teleport/templates/common/BigQueryConverters.java +++ b/v1/src/main/java/com/google/cloud/teleport/templates/common/BigQueryConverters.java @@ -85,7 +85,7 @@ public interface BigQueryReadOptions extends PipelineOptions { order = 1, description = "Input SQL query", helpText = - "A BigQuery SQL query that extracts data from the source. For example, select * from dataset1.sample_table.") + "A BigQuery SQL query that extracts data from the source. For example, `select * from dataset1.sample_table`.") ValueProvider getReadQuery(); void setReadQuery(ValueProvider value); diff --git a/v1/src/main/java/com/google/cloud/teleport/templates/common/CsvConverters.java b/v1/src/main/java/com/google/cloud/teleport/templates/common/CsvConverters.java index 04739b3f60..cdc001d154 100644 --- a/v1/src/main/java/com/google/cloud/teleport/templates/common/CsvConverters.java +++ b/v1/src/main/java/com/google/cloud/teleport/templates/common/CsvConverters.java @@ -75,7 +75,7 @@ public interface CsvPipelineOptions extends PipelineOptions { order = 1, optional = true, description = "Whether input CSV files contain a header record.", - helpText = "Whether headers are included in the CSV file. Defaults to: false.") + helpText = "Whether headers are included in the CSV file. Defaults to: `false`.") @Default.Boolean(false) ValueProvider getContainsHeaders(); @@ -93,7 +93,7 @@ public interface CsvPipelineOptions extends PipelineOptions { @TemplateParameter.Text( order = 3, description = "CSV Format to use for parsing records.", - helpText = "The CSV format according to Apache Commons CSV format. Defaults to: Default.") + helpText = "The CSV format according to Apache Commons CSV format. Defaults to: `Default`.") ValueProvider getCsvFormat(); void setCsvFormat(ValueProvider csvFormat); @@ -104,7 +104,7 @@ public interface CsvPipelineOptions extends PipelineOptions { regexes = {"^(US-ASCII|ISO-8859-1|UTF-8|UTF-16)$"}, description = "CSV file encoding", helpText = - "The CSV file character encoding format. Allowed Values are US-ASCII, ISO-8859-1, UTF-8, and UTF-16.") + "The CSV file character encoding format. Allowed Values are `US-ASCII`, `ISO-8859-1`, `UTF-8`, and `UTF-16`.") @Default.String("UTF-8") ValueProvider getCsvFileEncoding(); diff --git a/v1/src/main/java/com/google/cloud/teleport/templates/common/DatastoreConverters.java b/v1/src/main/java/com/google/cloud/teleport/templates/common/DatastoreConverters.java index 1b7777fe63..9ac8ac7104 100644 --- a/v1/src/main/java/com/google/cloud/teleport/templates/common/DatastoreConverters.java +++ b/v1/src/main/java/com/google/cloud/teleport/templates/common/DatastoreConverters.java @@ -221,7 +221,7 @@ public interface DatastoreWriteOptions extends PipelineOptions { optional = true, description = "Expected number of workers", helpText = - "Hint for the expected number of workers in the Datastore ramp-up throttling step. Default is `500`.") + "Hint for the expected number of workers in the Datastore ramp-up throttling step. Defaults to `500`.") @Default.Integer(500) @Hidden @Deprecated @@ -268,7 +268,7 @@ public interface DatastoreWriteOptions extends PipelineOptions { description = "Expected number of workers", helpText = "Hint for the expected number of workers in the Firestore ramp-up throttling step." - + " Default is 500.") + + " The default value is `500`.") // @Default can not be used here as it will make it use Firestore on a Datastore template. ValueProvider getFirestoreHintNumWorkers(); diff --git a/v1/src/main/java/com/google/cloud/teleport/templates/common/PubsubConverters.java b/v1/src/main/java/com/google/cloud/teleport/templates/common/PubsubConverters.java index a41ca848b8..92ae2610ad 100644 --- a/v1/src/main/java/com/google/cloud/teleport/templates/common/PubsubConverters.java +++ b/v1/src/main/java/com/google/cloud/teleport/templates/common/PubsubConverters.java @@ -58,7 +58,7 @@ public interface PubsubWriteDeadletterTopicOptions extends PipelineOptions { order = 1, description = "Output deadletter Pub/Sub topic", helpText = - "The Pub/Sub topic to forward undeliverable messages to. For example, projects//topics/.") + "The Pub/Sub topic to forward undeliverable messages to. For example, `projects//topics/`.") @Validation.Required ValueProvider getOutputDeadletterTopic(); diff --git a/v1/src/main/java/com/google/cloud/teleport/templates/common/SpannerConverters.java b/v1/src/main/java/com/google/cloud/teleport/templates/common/SpannerConverters.java index e219b4d0cf..a353b323b3 100644 --- a/v1/src/main/java/com/google/cloud/teleport/templates/common/SpannerConverters.java +++ b/v1/src/main/java/com/google/cloud/teleport/templates/common/SpannerConverters.java @@ -152,7 +152,7 @@ public interface SpannerReadOptions extends PipelineOptions { description = "Snapshot time", helpText = "The timestamp that corresponds to the version of the Spanner database that you want to read from." - + " The timestamp must be specified in the RFC 3339 (https://tools.ietf.org/html/rfc3339) UTC \"Zulu\" format." + + " The timestamp must be specified in the RFC 3339 (https://tools.ietf.org/html/rfc3339) UTC Zulu Time format." + " The timestamp must be in the past and" + " maximum timestamp staleness (https://cloud.google.com/spanner/docs/timestamp-bounds#maximum_timestamp_staleness) applies.", example = "1990-12-31T23:59:60Z") diff --git a/v1/src/main/java/com/google/cloud/teleport/templates/common/SplunkConverters.java b/v1/src/main/java/com/google/cloud/teleport/templates/common/SplunkConverters.java index 46e1a2eb27..1f3e4bcc13 100644 --- a/v1/src/main/java/com/google/cloud/teleport/templates/common/SplunkConverters.java +++ b/v1/src/main/java/com/google/cloud/teleport/templates/common/SplunkConverters.java @@ -117,7 +117,7 @@ public interface SplunkOptions extends PipelineOptions { optional = true, description = "Batch size for sending multiple events to Splunk HEC.", helpText = - "The batch size for sending multiple events to Splunk. Defaults to 1 (no batching).") + "The batch size for sending multiple events to Splunk. Defaults to `1` (no batching).") ValueProvider getBatchCount(); void setBatchCount(ValueProvider batchCount); @@ -127,7 +127,7 @@ public interface SplunkOptions extends PipelineOptions { optional = true, description = "Disable SSL certificate validation.", helpText = - "Disable SSL certificate validation. Default false (validation enabled). If true, the certificates are not validated (all certificates are trusted) and `rootCaCertificatePath` parameter is ignored.") + "Disable SSL certificate validation. Default `false` (validation enabled). If `true`, the certificates are not validated (all certificates are trusted) and `rootCaCertificatePath` parameter is ignored.") ValueProvider getDisableCertificateValidation(); void setDisableCertificateValidation(ValueProvider disableCertificateValidation); @@ -136,7 +136,7 @@ public interface SplunkOptions extends PipelineOptions { order = 5, optional = true, description = "Maximum number of parallel requests.", - helpText = "The maximum number of parallel requests. Defaults to 1 (no parallelism).") + helpText = "The maximum number of parallel requests. Defaults to `1` (no parallelism).") ValueProvider getParallelism(); void setParallelism(ValueProvider parallelism); @@ -146,7 +146,7 @@ public interface SplunkOptions extends PipelineOptions { optional = true, description = "Include full Pub/Sub message in the payload.", helpText = - "Include the full Pub/Sub message in the payload. Default false (only the data element is included in the payload).") + "Include the full Pub/Sub message in the payload. Default `false` (only the data element is included in the payload).") ValueProvider getIncludePubsubMessage(); void setIncludePubsubMessage(ValueProvider includePubsubMessage); @@ -156,7 +156,7 @@ public interface SplunkOptions extends PipelineOptions { optional = true, description = "Google Cloud KMS encryption key for the token", helpText = - "The Cloud KMS key to use to decrypt the HEC token string. This parameter must be provided when tokenSource is set to KMS. If the Cloud KMS key is provided, the HEC token string `must` be passed in encrypted.", + "The Cloud KMS key to use to decrypt the HEC token string. This parameter must be provided when tokenSource is set to KMS. If the Cloud KMS key is provided, the HEC token string must be passed in encrypted.", example = "projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name") ValueProvider getTokenKMSEncryptionKey(); @@ -171,7 +171,7 @@ public interface SplunkOptions extends PipelineOptions { }, description = "Google Cloud Secret Manager ID.", helpText = - "The Secret Manager secret ID for the token. This parameter must provided when the tokenSource is set to SECRET_MANAGER.", + "The Secret Manager secret ID for the token. This parameter must provided when the tokenSource is set to `SECRET_MANAGER`.", example = "projects/your-project-id/secrets/your-secret/versions/your-secret-version") ValueProvider getTokenSecretId(); diff --git a/v1/src/main/java/com/google/cloud/teleport/templates/common/TextConverters.java b/v1/src/main/java/com/google/cloud/teleport/templates/common/TextConverters.java index 16ece3faf6..7c7de0188c 100644 --- a/v1/src/main/java/com/google/cloud/teleport/templates/common/TextConverters.java +++ b/v1/src/main/java/com/google/cloud/teleport/templates/common/TextConverters.java @@ -61,7 +61,7 @@ public interface FilesystemWindowedWriteOptions extends PipelineOptions { description = "Output file directory in Cloud Storage", helpText = "The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse directory path for date & time formatters.", - example = "gs://your-bucket/your-path") + example = "gs://your-bucket/your-path/") @Validation.Required ValueProvider getOutputDirectory(); diff --git a/v2/astradb-to-bigquery/README_AstraDB_To_BigQuery.md b/v2/astradb-to-bigquery/README_AstraDB_To_BigQuery.md index 35f187cc37..a4573d470e 100644 --- a/v2/astradb-to-bigquery/README_AstraDB_To_BigQuery.md +++ b/v2/astradb-to-bigquery/README_AstraDB_To_BigQuery.md @@ -28,17 +28,17 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **astraToken** : The token value or secret resource ID. (Example: AstraCS:abcdefghij). -* **astraDatabaseId** : The database unique identifier (UUID). (Example: cf7af129-d33a-498f-ad06-d97a6ee6eb7). -* **astraKeyspace** : The name of the Cassandra keyspace inside of the Astra database. -* **astraTable** : The name of the table inside of the Cassandra database. (Example: my_table). +* **astraToken**: The token value or secret resource ID. For example, `AstraCS:abcdefghij`. +* **astraDatabaseId**: The database unique identifier (UUID). For example, `cf7af129-d33a-498f-ad06-d97a6ee6eb7`. +* **astraKeyspace**: The name of the Cassandra keyspace inside of the Astra database. +* **astraTable**: The name of the table inside of the Cassandra database. For example, `my_table`. ### Optional parameters -* **astraQuery** : The query to use to filter rows instead of reading the whole table. -* **astraDatabaseRegion** : If not provided, a default is chosen, which is useful with multi-region databases. -* **minTokenRangesCount** : The minimal number of splits to use to distribute the query. -* **outputTableSpec** : The BigQuery table location to write the output to. Use the format `:.`. The table's schema must match the input objects. +* **astraQuery**: The query to use to filter rows instead of reading the whole table. +* **astraDatabaseRegion**: If not provided, a default is chosen, which is useful with multi-region databases. +* **minTokenRangesCount**: The minimal number of splits to use to distribute the query. +* **outputTableSpec**: The BigQuery table location to write the output to. Use the format `:.`. The table's schema must match the input objects. @@ -221,10 +221,10 @@ resource "google_dataflow_flex_template_job" "astradb_to_bigquery" { name = "astradb-to-bigquery" region = var.region parameters = { - astraToken = "AstraCS:abcdefghij" - astraDatabaseId = "cf7af129-d33a-498f-ad06-d97a6ee6eb7" + astraToken = "" + astraDatabaseId = "" astraKeyspace = "" - astraTable = "my_table" + astraTable = "" # astraQuery = "" # astraDatabaseRegion = "" # minTokenRangesCount = "" diff --git a/v2/azure-eventhub-to-pubsub/README_Azure_Eventhub_to_PubSub.md b/v2/azure-eventhub-to-pubsub/README_Azure_Eventhub_to_PubSub.md index 856d174b99..9ca0f45ab1 100644 --- a/v2/azure-eventhub-to-pubsub/README_Azure_Eventhub_to_PubSub.md +++ b/v2/azure-eventhub-to-pubsub/README_Azure_Eventhub_to_PubSub.md @@ -16,10 +16,10 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **brokerServer** : Server IP or DNS for Azure Eventhub Endpoint (Example: mynamespace.servicebus.windows.net:9093). -* **inputTopic** : Azure Eventhub topic(s) to read the input from (Example: topic). -* **outputTopic** : The name of the topic to which data should published, in the format of 'projects/your-project-id/topics/your-topic-name' (Example: projects/your-project-id/topics/your-topic-name). -* **secret** : Secret Version, it can be a number like 1,2 or 3 or can be 'latest' (Example: projects/{project}/secrets/{secret}/versions/{secret_version}). +* **brokerServer**: Server IP or DNS for Azure Eventhub Endpoint For example, `mynamespace.servicebus.windows.net:9093`. +* **inputTopic**: Azure Eventhub topic(s) to read the input from For example, `topic`. +* **outputTopic**: The name of the topic to which data should published, in the format of 'projects/your-project-id/topics/your-topic-name' For example, `projects/your-project-id/topics/your-topic-name`. +* **secret**: Secret Version, it can be a number like 1,2 or 3 or can be 'latest' For example, `projects/{project}/secrets/{secret}/versions/{secret_version}`. ### Optional parameters @@ -193,10 +193,10 @@ resource "google_dataflow_flex_template_job" "azure_eventhub_to_pubsub" { name = "azure-eventhub-to-pubsub" region = var.region parameters = { - brokerServer = "mynamespace.servicebus.windows.net:9093" - inputTopic = "topic" - outputTopic = "projects/your-project-id/topics/your-topic-name" - secret = "projects/{project}/secrets/{secret}/versions/{secret_version}" + brokerServer = "" + inputTopic = "" + outputTopic = "" + secret = "" } } ``` diff --git a/v2/bigquery-to-bigtable/README_BigQuery_to_Bigtable.md b/v2/bigquery-to-bigtable/README_BigQuery_to_Bigtable.md index 3219141278..e36b79584a 100644 --- a/v2/bigquery-to-bigtable/README_BigQuery_to_Bigtable.md +++ b/v2/bigquery-to-bigtable/README_BigQuery_to_Bigtable.md @@ -16,27 +16,26 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **readIdColumn** : The name of the BigQuery column storing the unique identifier of the row. -* **bigtableWriteInstanceId** : The ID of the Bigtable instance that contains the table. -* **bigtableWriteTableId** : The ID of the Bigtable table to write to. -* **bigtableWriteColumnFamily** : The name of the column family of the Bigtable table to write data into. +* **readIdColumn**: The name of the BigQuery column storing the unique identifier of the row. +* **bigtableWriteInstanceId**: The ID of the Bigtable instance that contains the table. +* **bigtableWriteTableId**: The ID of the Bigtable table to write to. +* **bigtableWriteColumnFamily**: The name of the column family of the Bigtable table to write data into. ### Optional parameters -* **inputTableSpec** : The BigQuery table to read from. Format: `projectId:datasetId.tablename`. If you specify `inputTableSpec`, the template reads the data directly from BigQuery storage by using the BigQuery Storage Read API (https://cloud.google.com/bigquery/docs/reference/storage). For information about limitations in the Storage Read API, see https://cloud.google.com/bigquery/docs/reference/storage#limitations. You must specify either `inputTableSpec` or `query`. If you set both parameters, the template uses the `query` parameter. (Example: bigquery-project:dataset.input_table). -* **outputDeadletterTable** : The BigQuery table for messages that failed to reach the output table, in the format :.. If a table doesn't exist, is is created during pipeline execution. If not specified, `_error_records` is used. (Example: your-project-id:your-dataset.your-table-name). -* **query** : The SQL query to use to read data from BigQuery. If the BigQuery dataset is in a different project than the Dataflow job, specify the full dataset name in the SQL query, for example: ... By default, the `query` parameter uses GoogleSQL (https://cloud.google.com/bigquery/docs/introduction-sql), unless `useLegacySql` is `true`. You must specify either `inputTableSpec` or `query`. If you set both parameters, the template uses the `query` parameter. (Example: select * from sampledb.sample_table). -* **useLegacySql** : Set to true to use legacy SQL. This parameter only applies when using the `query` parameter. Defaults to: false. -* **queryLocation** : Needed when reading from an authorized view without underlying table's permission. (Example: US). -* **queryTempDataset** : With this option, you can set an existing dataset to create the temporary table to store the results of the query. (Example: temp_dataset). -* **bigtableRpcAttemptTimeoutMs** : The timeout for each Bigtable RPC attempt in milliseconds. -* **bigtableRpcTimeoutMs** : The total timeout for a Bigtable RPC operation in milliseconds. -* **bigtableAdditionalRetryCodes** : The additional retry codes. (Example: RESOURCE_EXHAUSTED,DEADLINE_EXCEEDED). -* **bigtableWriteAppProfile** : The ID of the Bigtable application profile to use for the export. If you do not specify an app profile, Bigtable uses the default app profile (https://cloud.google.com/bigtable/docs/app-profiles#default-app-profile) of the instance. -* **bigtableWriteProjectId** : The ID of the Google Cloud project that contains the Bigtable instanceto write data to. -* **bigtableBulkWriteLatencyTargetMs** : The latency target of Bigtable in milliseconds for latency-based throttling. -* **bigtableBulkWriteMaxRowKeyCount** : The maximum number of row keys in a Bigtable batch write operation. -* **bigtableBulkWriteMaxRequestSizeBytes** : The maximum bytes to include per Bigtable batch write operation. +* **inputTableSpec**: The BigQuery table to read from. If you specify `inputTableSpec`, the template reads the data directly from BigQuery storage by using the BigQuery Storage Read API (https://cloud.google.com/bigquery/docs/reference/storage). For information about limitations in the Storage Read API, see https://cloud.google.com/bigquery/docs/reference/storage#limitations. You must specify either `inputTableSpec` or `query`. If you set both parameters, the template uses the `query` parameter. For example, `:.`. +* **outputDeadletterTable**: The BigQuery table for messages that failed to reach the output table. If a table doesn't exist, it is created during pipeline execution. If not specified, `_error_records` is used. For example, `:.`. +* **query**: The SQL query to use to read data from BigQuery. If the BigQuery dataset is in a different project than the Dataflow job, specify the full dataset name in the SQL query, for example: ... By default, the `query` parameter uses GoogleSQL (https://cloud.google.com/bigquery/docs/introduction-sql), unless `useLegacySql` is `true`. You must specify either `inputTableSpec` or `query`. If you set both parameters, the template uses the `query` parameter. For example, `select * from sampledb.sample_table`. +* **useLegacySql**: Set to `true` to use legacy SQL. This parameter only applies when using the `query` parameter. Defaults to `false`. +* **queryLocation**: Needed when reading from an authorized view without underlying table's permission. For example, `US`. +* **bigtableRpcAttemptTimeoutMs**: The timeout for each Bigtable RPC attempt in milliseconds. +* **bigtableRpcTimeoutMs**: The total timeout for a Bigtable RPC operation in milliseconds. +* **bigtableAdditionalRetryCodes**: The additional retry codes. For example, `RESOURCE_EXHAUSTED,DEADLINE_EXCEEDED`. +* **bigtableWriteAppProfile**: The ID of the Bigtable application profile to use for the export. If you do not specify an app profile, Bigtable uses the default app profile (https://cloud.google.com/bigtable/docs/app-profiles#default-app-profile) of the instance. +* **bigtableWriteProjectId**: The ID of the Google Cloud project that contains the Bigtable instanceto write data to. +* **bigtableBulkWriteLatencyTargetMs**: The latency target of Bigtable in milliseconds for latency-based throttling. +* **bigtableBulkWriteMaxRowKeyCount**: The maximum number of row keys in a Bigtable batch write operation. +* **bigtableBulkWriteMaxRequestSizeBytes**: The maximum bytes to include per Bigtable batch write operation. @@ -253,15 +252,14 @@ resource "google_dataflow_flex_template_job" "bigquery_to_bigtable" { bigtableWriteInstanceId = "" bigtableWriteTableId = "" bigtableWriteColumnFamily = "" - # inputTableSpec = "bigquery-project:dataset.input_table" - # outputDeadletterTable = "your-project-id:your-dataset.your-table-name" - # query = "select * from sampledb.sample_table" + # inputTableSpec = "" + # outputDeadletterTable = "" + # query = "" # useLegacySql = "false" - # queryLocation = "US" - # queryTempDataset = "temp_dataset" + # queryLocation = "" # bigtableRpcAttemptTimeoutMs = "" # bigtableRpcTimeoutMs = "" - # bigtableAdditionalRetryCodes = "RESOURCE_EXHAUSTED,DEADLINE_EXCEEDED" + # bigtableAdditionalRetryCodes = "" # bigtableWriteAppProfile = "default" # bigtableWriteProjectId = "" # bigtableBulkWriteLatencyTargetMs = "" diff --git a/v2/bigquery-to-parquet/README_BigQuery_to_Parquet.md b/v2/bigquery-to-parquet/README_BigQuery_to_Parquet.md index c2772d6da5..d74487573a 100644 --- a/v2/bigquery-to-parquet/README_BigQuery_to_Parquet.md +++ b/v2/bigquery-to-parquet/README_BigQuery_to_Parquet.md @@ -20,14 +20,14 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **tableRef** : The BigQuery input table location. (Example: your-project:your-dataset.your-table-name). -* **bucket** : The Cloud Storage folder to write the Parquet files to. (Example: gs://your-bucket/export/). +* **tableRef**: The BigQuery input table location. For example, `your-project:your-dataset.your-table-name`. +* **bucket**: The Cloud Storage folder to write the Parquet files to. For example, `gs://your-bucket/export/`. ### Optional parameters -* **numShards** : The number of output file shards. The default value is 1. -* **fields** : A comma-separated list of fields to select from the input BigQuery table. -* **rowRestriction** : Read only rows which match the specified filter, which must be a SQL expression compatible with Google standard SQL (https://cloud.google.com/bigquery/docs/reference/standard-sql). If no value is specified, then all rows are returned. +* **numShards**: The number of output file shards. The default value is `1`. +* **fields**: A comma-separated list of fields to select from the input BigQuery table. +* **rowRestriction**: Read only rows which match the specified filter, which must be a SQL expression compatible with Google standard SQL (https://cloud.google.com/bigquery/docs/reference/standard-sql). If no value is specified, then all rows are returned. @@ -201,8 +201,8 @@ resource "google_dataflow_flex_template_job" "bigquery_to_parquet" { name = "bigquery-to-parquet" region = var.region parameters = { - tableRef = "your-project:your-dataset.your-table-name" - bucket = "gs://your-bucket/export/" + tableRef = "" + bucket = "" # numShards = "0" # fields = "" # rowRestriction = "" diff --git a/v2/bigquery-to-parquet/src/main/java/com/google/cloud/teleport/v2/templates/BigQueryToParquet.java b/v2/bigquery-to-parquet/src/main/java/com/google/cloud/teleport/v2/templates/BigQueryToParquet.java index 2a9b75340e..fa3f96947d 100644 --- a/v2/bigquery-to-parquet/src/main/java/com/google/cloud/teleport/v2/templates/BigQueryToParquet.java +++ b/v2/bigquery-to-parquet/src/main/java/com/google/cloud/teleport/v2/templates/BigQueryToParquet.java @@ -169,7 +169,7 @@ public interface BigQueryToParquetOptions extends PipelineOptions { order = 3, optional = true, description = "Maximum output shards", - helpText = "The number of output file shards. The default value is 1.") + helpText = "The number of output file shards. The default value is `1`.") @Default.Integer(0) Integer getNumShards(); diff --git a/v2/bigtable-changestreams-to-hbase/README_Bigtable_Change_Streams_to_HBase.md b/v2/bigtable-changestreams-to-hbase/README_Bigtable_Change_Streams_to_HBase.md index 56d82d8fcc..6c7523c3f6 100644 --- a/v2/bigtable-changestreams-to-hbase/README_Bigtable_Change_Streams_to_HBase.md +++ b/v2/bigtable-changestreams-to-hbase/README_Bigtable_Change_Streams_to_HBase.md @@ -13,33 +13,33 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **hbaseRootDir** : Hbase root directory, corresponds to hbase.rootdir. -* **hbaseZookeeperQuorumHost** : Zookeeper quorum host, corresponds to hbase.zookeeper.quorum host. -* **bigtableChangeStreamAppProfile** : The Bigtable application profile ID. The application profile must use single-cluster routing and allow single-row transactions. -* **bigtableReadInstanceId** : The source Bigtable instance ID. -* **bigtableReadTableId** : The source Bigtable table ID. +* **hbaseRootDir**: Hbase root directory, corresponds to hbase.rootdir. +* **hbaseZookeeperQuorumHost**: Zookeeper quorum host, corresponds to hbase.zookeeper.quorum host. +* **bigtableChangeStreamAppProfile**: The Bigtable application profile ID. The application profile must use single-cluster routing and allow single-row transactions. +* **bigtableReadInstanceId**: The source Bigtable instance ID. +* **bigtableReadTableId**: The source Bigtable table ID. ### Optional parameters -* **bidirectionalReplicationEnabled** : Whether bidirectional replication between hbase and bigtable is enabled, adds additional logic to filter out hbase-replicated mutations. Defaults to: false. -* **cbtQualifier** : Bidirectional replication source CBT qualifier. Defaults to: BIDIRECTIONAL_REPL_SOURCE_CBT. -* **dryRunEnabled** : When dry run is enabled, pipeline will not write to Hbase. Defaults to: false. -* **filterGCMutations** : Filters out garbage collection Delete mutations from CBT. Defaults to: false. -* **hbaseQualifier** : Bidirectional replication source Hbase qualifier. Defaults to: BIDIRECTIONAL_REPL_SOURCE_HBASE. -* **hbaseZookeeperQuorumPort** : Zookeeper quorum port, corresponds to hbase.zookeeper.quorum port. Defaults to: 2181. -* **bigtableChangeStreamMetadataInstanceId** : The Bigtable change streams metadata instance ID. Defaults to empty. -* **bigtableChangeStreamMetadataTableTableId** : The ID of the Bigtable change streams connector metadata table. If not provided, a Bigtable change streams connector metadata table is automatically created during pipeline execution. Defaults to empty. -* **bigtableChangeStreamCharset** : The Bigtable change streams charset name. Defaults to: UTF-8. -* **bigtableChangeStreamStartTimestamp** : The starting timestamp (https://tools.ietf.org/html/rfc3339), inclusive, to use for reading change streams. For example, `2022-05-05T07:59:59Z`. Defaults to the timestamp of the pipeline start time. -* **bigtableChangeStreamIgnoreColumnFamilies** : A comma-separated list of column family name changes to ignore. Defaults to empty. -* **bigtableChangeStreamIgnoreColumns** : A comma-separated list of column name changes to ignore. Defaults to empty. -* **bigtableChangeStreamName** : A unique name for the client pipeline. Lets you resume processing from the point at which a previously running pipeline stopped. Defaults to an automatically generated name. See the Dataflow job logs for the value used. -* **bigtableChangeStreamResume** : When set to `true`, a new pipeline resumes processing from the point at which a previously running pipeline with the same `bigtableChangeStreamName` value stopped. If the pipeline with the given `bigtableChangeStreamName` value has never run, a new pipeline doesn't start. When set to `false`, a new pipeline starts. If a pipeline with the same `bigtableChangeStreamName` value has already run for the given source, a new pipeline doesn't start. Defaults to `false`. -* **bigtableReadProjectId** : The Bigtable project ID. The default is the project for the Dataflow job. -* **bigtableReadAppProfile** : Bigtable App Profile to use for reads. The default for this parameter is the Bigtable instance's default app profile. -* **bigtableRpcAttemptTimeoutMs** : The timeout for each Bigtable RPC attempt in milliseconds. -* **bigtableRpcTimeoutMs** : The total timeout for a Bigtable RPC operation in milliseconds. -* **bigtableAdditionalRetryCodes** : The additional retry codes. (Example: RESOURCE_EXHAUSTED,DEADLINE_EXCEEDED). +* **bidirectionalReplicationEnabled**: Whether bidirectional replication between hbase and bigtable is enabled, adds additional logic to filter out hbase-replicated mutations. Defaults to: false. +* **cbtQualifier**: Bidirectional replication source CBT qualifier. Defaults to: BIDIRECTIONAL_REPL_SOURCE_CBT. +* **dryRunEnabled**: When dry run is enabled, pipeline will not write to Hbase. Defaults to: false. +* **filterGCMutations**: Filters out garbage collection Delete mutations from CBT. Defaults to: false. +* **hbaseQualifier**: Bidirectional replication source Hbase qualifier. Defaults to: BIDIRECTIONAL_REPL_SOURCE_HBASE. +* **hbaseZookeeperQuorumPort**: Zookeeper quorum port, corresponds to hbase.zookeeper.quorum port. Defaults to: 2181. +* **bigtableChangeStreamMetadataInstanceId**: The Bigtable change streams metadata instance ID. Defaults to empty. +* **bigtableChangeStreamMetadataTableTableId**: The ID of the Bigtable change streams connector metadata table. If not provided, a Bigtable change streams connector metadata table is automatically created during pipeline execution. Defaults to empty. +* **bigtableChangeStreamCharset**: The Bigtable change streams charset name. Defaults to: UTF-8. +* **bigtableChangeStreamStartTimestamp**: The starting timestamp (https://tools.ietf.org/html/rfc3339), inclusive, to use for reading change streams. For example, `2022-05-05T07:59:59Z`. Defaults to the timestamp of the pipeline start time. +* **bigtableChangeStreamIgnoreColumnFamilies**: A comma-separated list of column family name changes to ignore. Defaults to empty. +* **bigtableChangeStreamIgnoreColumns**: A comma-separated list of column name changes to ignore. Defaults to empty. +* **bigtableChangeStreamName**: A unique name for the client pipeline. Lets you resume processing from the point at which a previously running pipeline stopped. Defaults to an automatically generated name. See the Dataflow job logs for the value used. +* **bigtableChangeStreamResume**: When set to `true`, a new pipeline resumes processing from the point at which a previously running pipeline with the same `bigtableChangeStreamName` value stopped. If the pipeline with the given `bigtableChangeStreamName` value has never run, a new pipeline doesn't start. When set to `false`, a new pipeline starts. If a pipeline with the same `bigtableChangeStreamName` value has already run for the given source, a new pipeline doesn't start. Defaults to `false`. +* **bigtableReadProjectId**: The Bigtable project ID. The default is the project for the Dataflow job. +* **bigtableReadAppProfile**: Bigtable App Profile to use for reads. The default for this parameter is the Bigtable instance's default app profile. +* **bigtableRpcAttemptTimeoutMs**: The timeout for each Bigtable RPC attempt in milliseconds. +* **bigtableRpcTimeoutMs**: The total timeout for a Bigtable RPC operation in milliseconds. +* **bigtableAdditionalRetryCodes**: The additional retry codes. For example, `RESOURCE_EXHAUSTED,DEADLINE_EXCEEDED`. @@ -293,7 +293,7 @@ resource "google_dataflow_flex_template_job" "bigtable_change_streams_to_hbase" # bigtableReadAppProfile = "default" # bigtableRpcAttemptTimeoutMs = "" # bigtableRpcTimeoutMs = "" - # bigtableAdditionalRetryCodes = "RESOURCE_EXHAUSTED,DEADLINE_EXCEEDED" + # bigtableAdditionalRetryCodes = "" } } ``` diff --git a/v2/cdc-parent/cdc-agg/README_Cdc_To_BigQuery_Template.md b/v2/cdc-parent/cdc-agg/README_Cdc_To_BigQuery_Template.md index c9a974b092..b1a9a9fad0 100644 --- a/v2/cdc-parent/cdc-agg/README_Cdc_To_BigQuery_Template.md +++ b/v2/cdc-parent/cdc-agg/README_Cdc_To_BigQuery_Template.md @@ -16,19 +16,19 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputSubscriptions** : The comma-separated list of Pub/Sub input subscriptions to read from, in the format `,, ...`. -* **changeLogDataset** : The BigQuery dataset to store the staging tables in, in the format . -* **replicaDataset** : The location of the BigQuery dataset to store the replica tables in, in the format . +* **inputSubscriptions**: The comma-separated list of Pub/Sub input subscriptions to read from, in the format `,, ...`. +* **changeLogDataset**: The BigQuery dataset to store the staging tables in, in the format . +* **replicaDataset**: The location of the BigQuery dataset to store the replica tables in, in the format . ### Optional parameters -* **inputTopics** : Comma-separated list of PubSub topics to where CDC data is being pushed. -* **updateFrequencySecs** : The interval at which the pipeline updates the BigQuery table replicating the MySQL database. -* **useSingleTopic** : Set this to true if you have configured your Debezium connector to publish all table updates to a single topic. Defaults to: false. -* **useStorageWriteApi** : If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. -* **numStorageWriteApiStreams** : When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. -* **storageWriteApiTriggeringFrequencySec** : When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. +* **inputTopics**: Comma-separated list of PubSub topics to where CDC data is being pushed. +* **updateFrequencySecs**: The interval at which the pipeline updates the BigQuery table replicating the MySQL database. +* **useSingleTopic**: Set this to `true` if you configure your Debezium connector to publish all table updates to a single topic. Defaults to: false. +* **useStorageWriteApi**: If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **numStorageWriteApiStreams**: When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. +* **storageWriteApiTriggeringFrequencySec**: When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. diff --git a/v2/cdc-parent/cdc-change-applier/README_Cdc_To_BigQuery_Template.md b/v2/cdc-parent/cdc-change-applier/README_Cdc_To_BigQuery_Template.md index f95ec2f098..859ef3c389 100644 --- a/v2/cdc-parent/cdc-change-applier/README_Cdc_To_BigQuery_Template.md +++ b/v2/cdc-parent/cdc-change-applier/README_Cdc_To_BigQuery_Template.md @@ -16,19 +16,19 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputSubscriptions** : The comma-separated list of Pub/Sub input subscriptions to read from, in the format `,, ...`. -* **changeLogDataset** : The BigQuery dataset to store the staging tables in, in the format . -* **replicaDataset** : The location of the BigQuery dataset to store the replica tables in, in the format . +* **inputSubscriptions**: The comma-separated list of Pub/Sub input subscriptions to read from, in the format `,, ...`. +* **changeLogDataset**: The BigQuery dataset to store the staging tables in, in the format . +* **replicaDataset**: The location of the BigQuery dataset to store the replica tables in, in the format . ### Optional parameters -* **inputTopics** : Comma-separated list of PubSub topics to where CDC data is being pushed. -* **updateFrequencySecs** : The interval at which the pipeline updates the BigQuery table replicating the MySQL database. -* **useSingleTopic** : Set this to true if you have configured your Debezium connector to publish all table updates to a single topic. Defaults to: false. -* **useStorageWriteApi** : If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. -* **numStorageWriteApiStreams** : When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. -* **storageWriteApiTriggeringFrequencySec** : When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. +* **inputTopics**: Comma-separated list of PubSub topics to where CDC data is being pushed. +* **updateFrequencySecs**: The interval at which the pipeline updates the BigQuery table replicating the MySQL database. +* **useSingleTopic**: Set this to `true` if you configure your Debezium connector to publish all table updates to a single topic. Defaults to: false. +* **useStorageWriteApi**: If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **numStorageWriteApiStreams**: When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. +* **storageWriteApiTriggeringFrequencySec**: When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. diff --git a/v2/cdc-parent/cdc-change-applier/src/main/java/com/google/cloud/dataflow/cdc/applier/CdcToBigQueryChangeApplierPipeline.java b/v2/cdc-parent/cdc-change-applier/src/main/java/com/google/cloud/dataflow/cdc/applier/CdcToBigQueryChangeApplierPipeline.java index 46776037d7..662c1b4696 100644 --- a/v2/cdc-parent/cdc-change-applier/src/main/java/com/google/cloud/dataflow/cdc/applier/CdcToBigQueryChangeApplierPipeline.java +++ b/v2/cdc-parent/cdc-change-applier/src/main/java/com/google/cloud/dataflow/cdc/applier/CdcToBigQueryChangeApplierPipeline.java @@ -152,7 +152,7 @@ public interface CdcApplierOptions extends PipelineOptions, BigQueryStorageApiSt optional = true, description = "Whether to use a single topic for all MySQL table changes.", helpText = - "Set this to true if you have configured your Debezium connector to publish all table" + "Set this to `true` if you configure your Debezium connector to publish all table" + " updates to a single topic") @Default.Boolean(false) Boolean getUseSingleTopic(); diff --git a/v2/common/src/main/java/com/google/cloud/teleport/v2/auto/blocks/WriteToPubSub.java b/v2/common/src/main/java/com/google/cloud/teleport/v2/auto/blocks/WriteToPubSub.java index f6d64add79..0025fa36ad 100644 --- a/v2/common/src/main/java/com/google/cloud/teleport/v2/auto/blocks/WriteToPubSub.java +++ b/v2/common/src/main/java/com/google/cloud/teleport/v2/auto/blocks/WriteToPubSub.java @@ -32,9 +32,8 @@ public interface WriteToPubSubOptions extends PipelineOptions { order = 8, groupName = "Target", description = "Output Pub/Sub topic", - helpText = - "The name of the topic to which data should published, in the format of 'projects/your-project-id/topics/your-topic-name'", - example = "projects/your-project-id/topics/your-topic-name") + helpText = "The name of the topic to publish data to.", + example = "projects//topics/") @Validation.Required String getOutputTopic(); diff --git a/v2/common/src/main/java/com/google/cloud/teleport/v2/options/CommonTemplateOptions.java b/v2/common/src/main/java/com/google/cloud/teleport/v2/options/CommonTemplateOptions.java index 88601095a6..606be47993 100644 --- a/v2/common/src/main/java/com/google/cloud/teleport/v2/options/CommonTemplateOptions.java +++ b/v2/common/src/main/java/com/google/cloud/teleport/v2/options/CommonTemplateOptions.java @@ -26,7 +26,7 @@ public interface CommonTemplateOptions extends PipelineOptions { optional = true, description = "Disabled algorithms to override jdk.tls.disabledAlgorithms", helpText = - "Comma separated algorithms to disable. If this value is set to none, no algorithm is " + "Comma separated algorithms to disable. If this value is set to `none`, no algorithm is " + "disabled. Use this parameter with caution, because the algorithms disabled " + "by default might have vulnerabilities or performance issues.", example = "SSLv3, RC4") @@ -45,7 +45,7 @@ public interface CommonTemplateOptions extends PipelineOptions { "Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. " + "These files are saved in the /extra_files directory in each worker.", example = - "gs:///file.txt,projects//secrets//versions/") + "gs:///file.txt,projects//secrets//versions/") String getExtraFilesToStage(); void setExtraFilesToStage(String extraFilesToStage); diff --git a/v2/common/src/main/java/com/google/cloud/teleport/v2/options/WindowedFilenamePolicyOptions.java b/v2/common/src/main/java/com/google/cloud/teleport/v2/options/WindowedFilenamePolicyOptions.java index 26f579671d..a0033cb0c8 100644 --- a/v2/common/src/main/java/com/google/cloud/teleport/v2/options/WindowedFilenamePolicyOptions.java +++ b/v2/common/src/main/java/com/google/cloud/teleport/v2/options/WindowedFilenamePolicyOptions.java @@ -61,8 +61,8 @@ public interface WindowedFilenamePolicyOptions extends PipelineOptions { "The window duration is the interval in which data is written to the output directory. " + "Configure the duration based on the pipeline's throughput. For example, a higher " + "throughput might require smaller window sizes so that the data fits into memory. " - + "Defaults to 5m (5 minutes), with a minimum of 1s (1 second). Allowed formats are: [int]s (for seconds, example: 5s), " - + "[int]m (for minutes, example: 12m), [int]h (for hours, example: 2h).", + + "Defaults to `5m` (5 minutes), with a minimum of `1s` (1 second). Allowed formats are: `[int]s` (for seconds, example: `5s`), " + + "`[int]m` (for minutes, example: `12m`), `[int]h` (for hours, example: `2h`).", example = "5m") @Default.String("5m") String getWindowDuration(); @@ -75,9 +75,9 @@ public interface WindowedFilenamePolicyOptions extends PipelineOptions { regexes = {"^[^A-Za-z0-9/](y+|Y+)[^A-Za-z0-9/]$"}, description = "Custom Year Pattern to use for the output directory", helpText = - "Pattern for formatting the year. Must be one or more of 'y' or 'Y'. Case makes no" + "Pattern for formatting the year. Must be one or more of `y` or `Y`. Case makes no" + " difference in the year. The pattern can be optionally wrapped by characters that" - + " aren't either alphanumeric or the directory ('/') character. Defaults to 'YYYY'") + + " aren't either alphanumeric or the directory (`/`) character. Defaults to `YYYY`") @Default.String("YYYY") String getYearPattern(); @@ -89,9 +89,9 @@ public interface WindowedFilenamePolicyOptions extends PipelineOptions { regexes = {"^[^A-Za-z0-9/](M+)[^A-Za-z0-9/]$"}, description = "Custom Month Pattern to use for the output directory", helpText = - "Pattern for formatting the month. Must be one or more of the 'M' character. The " + "Pattern for formatting the month. Must be one or more of the `M` character. The " + "pattern can be optionally wrapped by characters that aren't alphanumeric or the " - + "directory ('/') character. Defaults to 'MM'") + + "directory (`/`) character. Defaults to `MM`") @Default.String("MM") String getMonthPattern(); @@ -103,10 +103,10 @@ public interface WindowedFilenamePolicyOptions extends PipelineOptions { regexes = {"^[^A-Za-z0-9/](d+|D+)[^A-Za-z0-9/]$"}, description = "Custom Day Pattern to use for the output directory", helpText = - "Pattern for formatting the day. Must be one or more of 'd' for day of month or 'D' for" + "Pattern for formatting the day. Must be one or more of `d` for day of month or `D` for" + " day of year. Case makes no difference in the year. The pattern can be optionally" - + " wrapped by characters that aren't either alphanumeric or the directory ('/')" - + " character. Defaults to 'dd'") + + " wrapped by characters that aren't either alphanumeric or the directory (`/`)" + + " character. Defaults to `dd`") @Default.String("dd") String getDayPattern(); @@ -118,9 +118,9 @@ public interface WindowedFilenamePolicyOptions extends PipelineOptions { regexes = {"^[^A-Za-z0-9/](H+)[^A-Za-z0-9/]$"}, description = "Custom Hour Pattern to use for the output directory", helpText = - "Pattern for formatting the hour. Must be one or more of the 'H' character. The pattern" + "Pattern for formatting the hour. Must be one or more of the `H` character. The pattern" + " can be optionally wrapped by characters that aren't alphanumeric or the directory" - + " ('/') character. Defaults to 'HH'") + + " (`/`) character. Defaults to `HH`") @Default.String("HH") String getHourPattern(); @@ -132,9 +132,9 @@ public interface WindowedFilenamePolicyOptions extends PipelineOptions { regexes = {"^[^A-Za-z0-9/](m+)[^A-Za-z0-9/]$"}, description = "Custom Minute Pattern to use for the output directory", helpText = - "Pattern for formatting the minute. Must be one or more of the 'm' character. The pattern" + "Pattern for formatting the minute. Must be one or more of the `m` character. The pattern" + " can be optionally wrapped by characters that aren't alphanumeric or the directory" - + " ('/') character. Defaults to 'mm'") + + " (`/`) character. Defaults to `mm`") @Default.String("mm") String getMinutePattern(); diff --git a/v2/common/src/main/java/com/google/cloud/teleport/v2/transforms/BigQueryConverters.java b/v2/common/src/main/java/com/google/cloud/teleport/v2/transforms/BigQueryConverters.java index 116071d69f..6ee96fbcc6 100644 --- a/v2/common/src/main/java/com/google/cloud/teleport/v2/transforms/BigQueryConverters.java +++ b/v2/common/src/main/java/com/google/cloud/teleport/v2/transforms/BigQueryConverters.java @@ -168,12 +168,12 @@ public interface BigQueryReadOptions extends PipelineOptions { optional = true, description = "BigQuery source table", helpText = - "The BigQuery table to read from. Format: `projectId:datasetId.tablename`. If you specify `inputTableSpec`, the template reads the data directly from BigQuery storage by using the" + "The BigQuery table to read from. If you specify `inputTableSpec`, the template reads the data directly from BigQuery storage by using the" + " BigQuery Storage Read API (https://cloud.google.com/bigquery/docs/reference/storage)." + " For information about limitations in the Storage Read API, see" + " https://cloud.google.com/bigquery/docs/reference/storage#limitations." + " You must specify either `inputTableSpec` or `query`. If you set both parameters, the template uses the `query` parameter.", - example = "bigquery-project:dataset.input_table") + example = ":.") String getInputTableSpec(); void setInputTableSpec(String inputTableSpec); @@ -183,11 +183,10 @@ public interface BigQueryReadOptions extends PipelineOptions { optional = true, description = "The dead-letter table name to output failed messages to BigQuery", helpText = - "The BigQuery table for messages that failed to reach the output" - + " table, in the format :.." - + " If a table doesn't exist, is is created during pipeline execution. If" + "The BigQuery table for messages that failed to reach the output table." + + " If a table doesn't exist, it is created during pipeline execution. If" + " not specified, `_error_records` is used.", - example = "your-project-id:your-dataset.your-table-name") + example = ":.") String getOutputDeadletterTable(); void setOutputDeadletterTable(String outputDeadletterTable); @@ -212,8 +211,8 @@ public interface BigQueryReadOptions extends PipelineOptions { optional = true, description = "Set to true to use legacy SQL", helpText = - "Set to true to use legacy SQL. This parameter only applies when using" - + " the `query` parameter. Defaults to: false.") + "Set to `true` to use legacy SQL. This parameter only applies when using" + + " the `query` parameter. Defaults to `false`.") @Default.Boolean(false) Boolean getUseLegacySql(); diff --git a/v2/common/src/main/java/com/google/cloud/teleport/v2/transforms/CsvConverters.java b/v2/common/src/main/java/com/google/cloud/teleport/v2/transforms/CsvConverters.java index 7440101d41..f117f86e9f 100644 --- a/v2/common/src/main/java/com/google/cloud/teleport/v2/transforms/CsvConverters.java +++ b/v2/common/src/main/java/com/google/cloud/teleport/v2/transforms/CsvConverters.java @@ -184,7 +184,7 @@ public interface CsvPipelineOptions extends PipelineOptions { order = 1, description = "The input filepattern to read from.", helpText = - "The Cloud Storage file pattern to search for CSV files. Example: gs://mybucket/test-*.csv.") + "The Cloud Storage file pattern to search for CSV files. For example, `gs://mybucket/test-*.csv`.") String getInputFileSpec(); void setInputFileSpec(String inputFileSpec); @@ -214,8 +214,7 @@ public interface CsvPipelineOptions extends PipelineOptions { order = 4, optional = true, description = "Column delimiter of the data files.", - helpText = - "The column delimiter of the input text files. Default: use delimiter provided in csvFormat", + helpText = "The column delimiter of the input text files. Default: `,`", example = ",") @Default.InstanceFactory(DelimiterFactory.class) String getDelimiter(); @@ -227,7 +226,7 @@ public interface CsvPipelineOptions extends PipelineOptions { optional = true, description = "CSV Format to use for parsing records.", helpText = - "CSV format specification to use for parsing records. Default is: Default. See https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.html for more details. Must match format names exactly found at: " + "CSV format specification to use for parsing records. Default is: `Default`. See https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.html for more details. Must match format names exactly found at: " + "https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.Predefined.html") @Default.String("Default") String getCsvFormat(); @@ -238,7 +237,7 @@ public interface CsvPipelineOptions extends PipelineOptions { order = 6, optional = true, description = "Path to JSON schema", - helpText = "The path to the JSON schema. Defaults to: null.", + helpText = "The path to the JSON schema. Defaults to `null`.", example = "gs://path/to/schema") String getJsonSchemaPath(); @@ -249,7 +248,7 @@ public interface CsvPipelineOptions extends PipelineOptions { optional = true, description = "Set to true if number of files is in the tens of thousands", helpText = - "Set to true if number of files is in the tens of thousands. Defaults to: false.") + "Set to true if number of files is in the tens of thousands. Defaults to `false`.") @Default.Boolean(false) Boolean getLargeNumFiles(); @@ -261,7 +260,7 @@ public interface CsvPipelineOptions extends PipelineOptions { regexes = {"^(US-ASCII|ISO-8859-1|UTF-8|UTF-16)$"}, description = "CSV file encoding", helpText = - "The CSV file character encoding format. Allowed Values are US-ASCII, ISO-8859-1, UTF-8, and UTF-16.") + "The CSV file character encoding format. Allowed values are `US-ASCII`, `ISO-8859-1`, `UTF-8`, and `UTF-16`.") @Default.String("UTF-8") String getCsvFileEncoding(); @@ -272,8 +271,8 @@ public interface CsvPipelineOptions extends PipelineOptions { optional = true, description = "Log detailed CSV conversion errors", helpText = - "Set to true to enable detailed error logging when CSV parsing fails. Note that this may expose sensitive data in the logs (e.g., if the CSV file contains passwords)." - + " Default: false.") + "Set to `true` to enable detailed error logging when CSV parsing fails. Note that this may expose sensitive data in the logs (e.g., if the CSV file contains passwords)." + + " Default: `false`.") @Default.Boolean(false) Boolean getLogDetailedCsvConversionErrors(); diff --git a/v2/common/src/main/java/com/google/cloud/teleport/v2/transforms/JavascriptTextTransformer.java b/v2/common/src/main/java/com/google/cloud/teleport/v2/transforms/JavascriptTextTransformer.java index ba8a48d1ca..ed05a4843f 100644 --- a/v2/common/src/main/java/com/google/cloud/teleport/v2/transforms/JavascriptTextTransformer.java +++ b/v2/common/src/main/java/com/google/cloud/teleport/v2/transforms/JavascriptTextTransformer.java @@ -109,8 +109,8 @@ public interface JavascriptTextTransformerOptions extends PipelineOptions { + "is greater than 0, Dataflow periodically checks the UDF file in " + "Cloud Storage, and reloads the UDF if the file is modified. " + "This parameter allows you to update the UDF while the pipeline is running, " - + "without needing to restart the job. If the value is 0, UDF reloading is " - + "disabled. The default value is 0.") + + "without needing to restart the job. If the value is `0`, UDF reloading is " + + "disabled. The default value is `0`.") @Default.Integer(0) Integer getJavascriptTextTransformReloadIntervalMinutes(); diff --git a/v2/dataplex/README_Dataplex_BigQuery_to_GCS.md b/v2/dataplex/README_Dataplex_BigQuery_to_GCS.md index 717c745133..aa5016d07e 100644 --- a/v2/dataplex/README_Dataplex_BigQuery_to_GCS.md +++ b/v2/dataplex/README_Dataplex_BigQuery_to_GCS.md @@ -14,21 +14,21 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **sourceBigQueryDataset** : Dataplex asset name for the BigQuery dataset to tier data from. Format: projects//locations//lakes//zones//assets/ (Dataplex asset name) or projects//datasets/ (BigQuery dataset ID). -* **destinationStorageBucketAssetName** : Dataplex asset name for the Cloud Storage bucket to tier data to. Format: projects//locations//lakes//zones//assets/. -* **maxParallelBigQueryMetadataRequests** : The maximum number of parallel requests that will be sent to BigQuery when loading table/partition metadata. Defaults to: 5. +* **sourceBigQueryDataset**: Dataplex asset name for the BigQuery dataset to tier data from. Format: projects//locations//lakes//zones//assets/ (Dataplex asset name) or projects//datasets/ (BigQuery dataset ID). +* **destinationStorageBucketAssetName**: Dataplex asset name for the Cloud Storage bucket to tier data to. Format: projects//locations//lakes//zones//assets/. +* **maxParallelBigQueryMetadataRequests**: The maximum number of parallel requests that will be sent to BigQuery when loading table/partition metadata. Defaults to: 5. ### Optional parameters -* **tables** : A comma-separated list of BigQuery tables to tier. If none specified, all tables will be tiered. Tables should be specified by their name only (no project/dataset prefix). Case-sensitive!. -* **exportDataModifiedBeforeDateTime** : Move data older than this date (and optional time). For partitioned tables, move partitions last modified before this date/time. For non-partitioned tables, move if the table was last modified before this date/time. If not specified, move all tables / partitions. The date/time is parsed in the default time zone by default, but optional suffixes Z and +HH:mm are supported. Format: YYYY-MM-DD or YYYY-MM-DDTHH:mm:ss or YYYY-MM-DDTHH:mm:ss+03:00. Relative date/time (https://en.wikipedia.org/wiki/ISO_8601#Durations) is also supported. Format: -PnDTnHnMn.nS (must start with -P meaning time in the past). -* **fileFormat** : Output file format in Cloud Storage. Format: PARQUET or AVRO. Defaults to: PARQUET. -* **fileCompression** : Output file compression. Format: UNCOMPRESSED, SNAPPY, GZIP, or BZIP2. BZIP2 not supported for PARQUET files. Defaults to: SNAPPY. -* **partitionIdRegExp** : Process partitions with partition ID matching this regexp only. Default: process all. -* **writeDisposition** : Specifies the action that occurs if a destination file already exists. Format: OVERWRITE, FAIL, SKIP. If SKIP, only files that don't exist in the destination directory will be processed. If FAIL and at least one file already exists, no data will be processed and an error will be produced. Defaults to: SKIP. -* **enforceSamePartitionKey** : Whether to enforce the same partition key. Due to a BigQuery limitation, it's not possible to have a partitioned external table with the partition key (in the file path) to have the same name as one of the columns in the file. If this param is true (the default), the partition key of the target file will be set to the original partition column name and the column in the file will be renamed. If false, it's the partition key that will be renamed. -* **deleteSourceData** : Whether to delete source data from BigQuery after a successful export. Format: true or false. Defaults to: false. -* **updateDataplexMetadata** : Whether to update Dataplex metadata for the newly created entities. Only supported for Cloud Storage destination. If enabled, the pipeline will automatically copy the schema from source to the destination Dataplex entities, and the automated Dataplex Discovery won't run for them. Use this flag in cases where you have managed schema at the source. Defaults to: false. +* **tables**: A comma-separated list of BigQuery tables to tier. If none specified, all tables will be tiered. Tables should be specified by their name only (no project/dataset prefix). Case-sensitive!. +* **exportDataModifiedBeforeDateTime**: Move data older than this date (and optional time). For partitioned tables, move partitions last modified before this date/time. For non-partitioned tables, move if the table was last modified before this date/time. If not specified, move all tables / partitions. The date/time is parsed in the default time zone by default, but optional suffixes Z and +HH:mm are supported. Format: YYYY-MM-DD or YYYY-MM-DDTHH:mm:ss or YYYY-MM-DDTHH:mm:ss+03:00. Relative date/time (https://en.wikipedia.org/wiki/ISO_8601#Durations) is also supported. Format: -PnDTnHnMn.nS (must start with -P meaning time in the past). +* **fileFormat**: Output file format in Cloud Storage. Format: PARQUET or AVRO. Defaults to: PARQUET. +* **fileCompression**: Output file compression. Format: UNCOMPRESSED, SNAPPY, GZIP, or BZIP2. BZIP2 not supported for PARQUET files. Defaults to: SNAPPY. +* **partitionIdRegExp**: Process partitions with partition ID matching this regexp only. Default: process all. +* **writeDisposition**: Specifies the action that occurs if a destination file already exists. Format: OVERWRITE, FAIL, SKIP. If SKIP, only files that don't exist in the destination directory will be processed. If FAIL and at least one file already exists, no data will be processed and an error will be produced. Defaults to: SKIP. +* **enforceSamePartitionKey**: Whether to enforce the same partition key. Due to a BigQuery limitation, it's not possible to have a partitioned external table with the partition key (in the file path) to have the same name as one of the columns in the file. If this param is true (the default), the partition key of the target file will be set to the original partition column name and the column in the file will be renamed. If false, it's the partition key that will be renamed. +* **deleteSourceData**: Whether to delete source data from BigQuery after a successful export. Format: true or false. Defaults to: false. +* **updateDataplexMetadata**: Whether to update Dataplex metadata for the newly created entities. Only supported for Cloud Storage destination. If enabled, the pipeline will automatically copy the schema from source to the destination Dataplex entities, and the automated Dataplex Discovery won't run for them. Use this flag in cases where you have managed schema at the source. Defaults to: false. diff --git a/v2/dataplex/README_Dataplex_File_Format_Conversion.md b/v2/dataplex/README_Dataplex_File_Format_Conversion.md index 2a80b0474e..64ee27083d 100644 --- a/v2/dataplex/README_Dataplex_File_Format_Conversion.md +++ b/v2/dataplex/README_Dataplex_File_Format_Conversion.md @@ -14,15 +14,15 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputAssetOrEntitiesList** : Dataplex asset or Dataplex entities that contain the input files. Format: projects//locations//lakes//zones//assets/ OR projects//locations//lakes//zones//entities/,projects//locations//lakes//zones//entities/... . -* **outputFileFormat** : Output file format in Cloud Storage. Format: PARQUET or AVRO. -* **outputAsset** : Name of the Dataplex asset that contains Cloud Storage bucket where output files will be put into. Format: projects//locations//lakes//zones//assets/. +* **inputAssetOrEntitiesList**: Dataplex asset or Dataplex entities that contain the input files. Format: projects//locations//lakes//zones//assets/ OR projects//locations//lakes//zones//entities/,projects//locations//lakes//zones//entities/... . +* **outputFileFormat**: Output file format in Cloud Storage. Format: PARQUET or AVRO. +* **outputAsset**: Name of the Dataplex asset that contains Cloud Storage bucket where output files will be put into. Format: projects//locations//lakes//zones//assets/. ### Optional parameters -* **outputFileCompression** : Output file compression. Format: UNCOMPRESSED, SNAPPY, GZIP, or BZIP2. BZIP2 not supported for PARQUET files. Defaults to: SNAPPY. -* **writeDisposition** : Specifies the action that occurs if a destination file already exists. Format: OVERWRITE, FAIL, SKIP. If SKIP, only files that don't exist in the destination directory will be processed. If FAIL and at least one file already exists, no data will be processed and an error will be produced. Defaults to: SKIP. -* **updateDataplexMetadata** : Whether to update Dataplex metadata for the newly created entities. Only supported for Cloud Storage destination. If enabled, the pipeline will automatically copy the schema from source to the destination Dataplex entities, and the automated Dataplex Discovery won't run for them. Use this flag in cases where you have managed schema at the source. Defaults to: false. +* **outputFileCompression**: Output file compression. Format: UNCOMPRESSED, SNAPPY, GZIP, or BZIP2. BZIP2 not supported for PARQUET files. Defaults to: SNAPPY. +* **writeDisposition**: Specifies the action that occurs if a destination file already exists. Format: OVERWRITE, FAIL, SKIP. If SKIP, only files that don't exist in the destination directory will be processed. If FAIL and at least one file already exists, no data will be processed and an error will be produced. Defaults to: SKIP. +* **updateDataplexMetadata**: Whether to update Dataplex metadata for the newly created entities. Only supported for Cloud Storage destination. If enabled, the pipeline will automatically copy the schema from source to the destination Dataplex entities, and the automated Dataplex Discovery won't run for them. Use this flag in cases where you have managed schema at the source. Defaults to: false. diff --git a/v2/dataplex/README_Dataplex_JDBC_Ingestion.md b/v2/dataplex/README_Dataplex_JDBC_Ingestion.md index fe4f05d583..128cc6ec88 100644 --- a/v2/dataplex/README_Dataplex_JDBC_Ingestion.md +++ b/v2/dataplex/README_Dataplex_JDBC_Ingestion.md @@ -22,28 +22,28 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **connectionURL** : Url connection string to connect to the JDBC source. Connection string can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. (Example: jdbc:mysql://some-host:3306/sampledb). -* **driverClassName** : JDBC driver class name to use. (Example: com.mysql.jdbc.Driver). -* **driverJars** : Comma separated Cloud Storage paths for JDBC drivers. (Example: gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar). -* **query** : Query to be executed on the source to extract the data. (Example: select * from sampledb.sample_table). -* **outputTable** : BigQuery table location or Cloud Storage top folder name to write the output to. If it's a BigQuery table location, the table’s schema must match the source query schema and should in the format of some-project-id:somedataset.sometable. If it's a Cloud Storage top folder, just provide the top folder name. -* **outputAsset** : Dataplex output asset ID to which the results are stored to. Should be in the format of projects/your-project/locations//lakes//zones//assets/. +* **connectionURL**: Url connection string to connect to the JDBC source. Connection string can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. For example, `jdbc:mysql://some-host:3306/sampledb`. +* **driverClassName**: JDBC driver class name to use. For example, `com.mysql.jdbc.Driver`. +* **driverJars**: Comma separated Cloud Storage paths for JDBC drivers. For example, `gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar`. +* **query**: Query to be executed on the source to extract the data. For example, `select * from sampledb.sample_table`. +* **outputTable**: BigQuery table location or Cloud Storage top folder name to write the output to. If it's a BigQuery table location, the table’s schema must match the source query schema and should in the format of some-project-id:somedataset.sometable. If it's a Cloud Storage top folder, just provide the top folder name. +* **outputAsset**: Dataplex output asset ID to which the results are stored to. Should be in the format of projects/your-project/locations//lakes//zones//assets/. ### Optional parameters -* **connectionProperties** : Properties string to use for the JDBC connection. Format of the string must be [propertyName=property;]*. (Example: unicode=true;characterEncoding=UTF-8). -* **username** : User name to be used for the JDBC connection. User name can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. -* **password** : Password to be used for the JDBC connection. Password can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. -* **KMSEncryptionKey** : If this parameter is provided, password, user name and connection string should all be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **partitioningScheme** : The partition scheme when writing the file. Format: DAILY or MONTHLY or HOURLY. Defaults to: DAILY. -* **paritionColumn** : The partition column on which the partition is based. The column type must be of timestamp/date format. -* **writeDisposition** : Strategy to employ if the target file/table exists. If the table exists - should it overwrite/append or fail the load. Format: WRITE_APPEND or WRITE_TRUNCATE or WRITE_EMPTY. Only supported for writing to BigQuery. Defaults to: WRITE_EMPTY. -* **fileFormat** : Output file format in Cloud Storage. Format: PARQUET or AVRO. Defaults to: PARQUET. -* **useColumnAlias** : If enabled (set to true) the pipeline will consider column alias ("AS") instead of the column name to map the rows to BigQuery. Defaults to false. -* **fetchSize** : It should ONLY be used if the default value throws memory errors. If not set, using Beam's default fetch size. -* **updateDataplexMetadata** : Whether to update Dataplex metadata for the newly created entities. Only supported for Cloud Storage destination. If enabled, the pipeline will automatically copy the schema from source to the destination Dataplex entities, and the automated Dataplex Discovery won't run for them. Use this flag in cases where you have managed schema at the source. Defaults to: false. -* **useStorageWriteApi** : If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **connectionProperties**: Properties string to use for the JDBC connection. Format of the string must be [propertyName=property;]*. For example, `unicode=true;characterEncoding=UTF-8`. +* **username**: User name to be used for the JDBC connection. User name can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. +* **password**: Password to be used for the JDBC connection. Password can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. +* **KMSEncryptionKey**: If this parameter is provided, password, user name and connection string should all be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **partitioningScheme**: The partition scheme when writing the file. Format: DAILY or MONTHLY or HOURLY. Defaults to: DAILY. +* **paritionColumn**: The partition column on which the partition is based. The column type must be of timestamp/date format. +* **writeDisposition**: Strategy to employ if the target file/table exists. If the table exists - should it overwrite/append or fail the load. Format: WRITE_APPEND or WRITE_TRUNCATE or WRITE_EMPTY. Only supported for writing to BigQuery. Defaults to: WRITE_EMPTY. +* **fileFormat**: Output file format in Cloud Storage. Format: PARQUET or AVRO. Defaults to: PARQUET. +* **useColumnAlias**: If enabled (set to true) the pipeline will consider column alias ("AS") instead of the column name to map the rows to BigQuery. Defaults to false. +* **fetchSize**: It should ONLY be used if the default value throws memory errors. If not set, using Beam's default fetch size. +* **updateDataplexMetadata**: Whether to update Dataplex metadata for the newly created entities. Only supported for Cloud Storage destination. If enabled, the pipeline will automatically copy the schema from source to the destination Dataplex entities, and the automated Dataplex Discovery won't run for them. Use this flag in cases where you have managed schema at the source. Defaults to: false. +* **useStorageWriteApi**: If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. @@ -259,16 +259,16 @@ resource "google_dataflow_flex_template_job" "dataplex_jdbc_ingestion" { name = "dataplex-jdbc-ingestion" region = var.region parameters = { - connectionURL = "jdbc:mysql://some-host:3306/sampledb" - driverClassName = "com.mysql.jdbc.Driver" - driverJars = "gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar" - query = "select * from sampledb.sample_table" + connectionURL = "" + driverClassName = "" + driverJars = "" + query = "" outputTable = "" outputAsset = "" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" + # connectionProperties = "" # username = "" # password = "" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" + # KMSEncryptionKey = "" # partitioningScheme = "DAILY" # paritionColumn = "" # writeDisposition = "WRITE_EMPTY" diff --git a/v2/datastream-to-bigquery/README_Cloud_Datastream_to_BigQuery.md b/v2/datastream-to-bigquery/README_Cloud_Datastream_to_BigQuery.md index 99d1d246cf..09ba8ec724 100644 --- a/v2/datastream-to-bigquery/README_Cloud_Datastream_to_BigQuery.md +++ b/v2/datastream-to-bigquery/README_Cloud_Datastream_to_BigQuery.md @@ -31,39 +31,39 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputFilePattern** : The file location for Datastream file output in Cloud Storage, in the format: gs:////. -* **inputFileFormat** : The format of the output files produced by Datastream. Value can be 'avro' or 'json'. Defaults to: avro. -* **gcsPubSubSubscription** : The Pub/Sub subscription used by Cloud Storage to notify Dataflow of new files available for processing, in the format: projects//subscriptions/. -* **outputStagingDatasetTemplate** : The name of the dataset that contains staging tables. This parameter supports templates, for example {_metadata_dataset}_log or my_dataset_log. Normally, this parameter is a dataset name. Defaults to: {_metadata_dataset}. -* **outputDatasetTemplate** : The name of the dataset that contains the replica tables. This parameter supports templates, for example {_metadata_dataset} or my_dataset. Normally, this parameter is a dataset name. Defaults to: {_metadata_dataset}. -* **deadLetterQueueDirectory** : The path that Dataflow uses to write the dead-letter queue output. This path must not be in the same path as the Datastream file output. Defaults to empty. +* **inputFilePattern**: The file location for Datastream file output in Cloud Storage, in the format `gs:////`. +* **inputFileFormat**: The format of the output files produced by Datastream. Allowed values are `avro` and `json`. Defaults to `avro`. +* **gcsPubSubSubscription**: The Pub/Sub subscription used by Cloud Storage to notify Dataflow of new files available for processing, in the format: `projects//subscriptions/`. +* **outputStagingDatasetTemplate**: The name of the dataset that contains staging tables. This parameter supports templates, for example `{_metadata_dataset}_log` or `my_dataset_log`. Normally, this parameter is a dataset name. Defaults to `{_metadata_dataset}`. +* **outputDatasetTemplate**: The name of the dataset that contains the replica tables. This parameter supports templates, for example `{_metadata_dataset}` or `my_dataset`. Normally, this parameter is a dataset name. Defaults to `{_metadata_dataset}`. +* **deadLetterQueueDirectory**: The path that Dataflow uses to write the dead-letter queue output. This path must not be in the same path as the Datastream file output. Defaults to `empty`. ### Optional parameters -* **streamName** : The name or the template for the stream to poll for schema information. Defaults to: {_metadata_stream}. The default value is usually enough. -* **rfcStartDateTime** : The starting DateTime to use to fetch data from Cloud Storage (https://tools.ietf.org/html/rfc3339). Defaults to: 1970-01-01T00:00:00.00Z. -* **fileReadConcurrency** : The number of concurrent DataStream files to read. Default is 10. -* **outputProjectId** : The ID of the Google Cloud project that contains the BigQuery datasets to output data into. The default for this parameter is the project where the Dataflow pipeline is running. -* **outputStagingTableNameTemplate** : The template to use to name the staging tables. For example, {_metadata_table}). Defaults to: {_metadata_table}_log. -* **outputTableNameTemplate** : The template to use for the name of the replica tables, for example {_metadata_table}. Defaults to: {_metadata_table}. -* **ignoreFields** : Comma-separated fields to ignore in BigQuery. Defaults to: _metadata_stream,_metadata_schema,_metadata_table,_metadata_source,_metadata_tx_id,_metadata_dlq_reconsumed,_metadata_primary_keys,_metadata_error,_metadata_retry_count. (Example: _metadata_stream,_metadata_schema). -* **mergeFrequencyMinutes** : The number of minutes between merges for a given table. Defaults to: 5. -* **dlqRetryMinutes** : The number of minutes between DLQ Retries. Defaults to: 10. -* **dataStreamRootUrl** : The Datastream API root URL. Defaults to: https://datastream.googleapis.com/. -* **applyMerge** : Whether to disable MERGE queries for the job. Defaults to: true. -* **mergeConcurrency** : The number of concurrent BigQuery MERGE queries. Only effective when applyMerge is set to true. Defaults to: 30. -* **partitionRetentionDays** : The number of days to use for partition retention when running BigQuery merges. Defaults to: 1. -* **useStorageWriteApiAtLeastOnce** : This parameter takes effect only if "Use BigQuery Storage Write API" is enabled. If true, at-least-once semantics are used for the Storage Write API. Otherwise, exactly-once semantics are used. Defaults to: false. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. (Example: gs://my-bucket/my-udfs/my_file.js). -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **javascriptTextTransformReloadIntervalMinutes** : Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is 0, UDF reloading is disabled. The default value is 0. -* **pythonTextTransformGcsPath** : The Cloud Storage path pattern for the Python code containing your user-defined functions. (Example: gs://your-bucket/your-transforms/*.py). -* **pythonRuntimeVersion** : The runtime version to use for this Python UDF. -* **pythonTextTransformFunctionName** : The name of the function to call from your JavaScript file. Use only letters, digits, and underscores. (Example: transform_udf1). -* **runtimeRetries** : The number of times a runtime will be retried before failing. Defaults to: 5. -* **useStorageWriteApi** : If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **numStorageWriteApiStreams** : When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. -* **storageWriteApiTriggeringFrequencySec** : When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. +* **streamName**: The name or the template for the stream to poll for schema information. Defaults to: {_metadata_stream}. The default value is usually enough. +* **rfcStartDateTime**: The starting DateTime to use to fetch data from Cloud Storage (https://tools.ietf.org/html/rfc3339). Defaults to: `1970-01-01T00:00:00.00Z`. +* **fileReadConcurrency**: The number of concurrent DataStream files to read. Default is `10`. +* **outputProjectId**: The ID of the Google Cloud project that contains the BigQuery datasets to output data into. The default for this parameter is the project where the Dataflow pipeline is running. +* **outputStagingTableNameTemplate**: The template to use to name the staging tables. For example, `{_metadata_table}`. Defaults to `{_metadata_table}_log`. +* **outputTableNameTemplate**: The template to use for the name of the replica tables, for example `{_metadata_table}`. Defaults to `{_metadata_table}`. +* **ignoreFields**: Comma-separated fields to ignore in BigQuery. Defaults to: `_metadata_stream,_metadata_schema,_metadata_table,_metadata_source,_metadata_tx_id,_metadata_dlq_reconsumed,_metadata_primary_keys,_metadata_error,_metadata_retry_count`. For example, `_metadata_stream,_metadata_schema`. +* **mergeFrequencyMinutes**: The number of minutes between merges for a given table. Defaults to `5`. +* **dlqRetryMinutes**: The number of minutes between DLQ Retries. Defaults to `10`. +* **dataStreamRootUrl**: The Datastream API root URL. Defaults to: https://datastream.googleapis.com/. +* **applyMerge**: Whether to disable MERGE queries for the job. Defaults to `true`. +* **mergeConcurrency**: The number of concurrent BigQuery MERGE queries. Only effective when applyMerge is set to true. Defaults to `30`. +* **partitionRetentionDays**: The number of days to use for partition retention when running BigQuery merges. Defaults to `1`. +* **useStorageWriteApiAtLeastOnce**: This parameter takes effect only if `Use BigQuery Storage Write API` is enabled. If `true`, at-least-once semantics are used for the Storage Write API. Otherwise, exactly-once semantics are used. Defaults to `false`. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **javascriptTextTransformReloadIntervalMinutes**: Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is `0`, UDF reloading is disabled. The default value is `0`. +* **pythonTextTransformGcsPath**: The Cloud Storage path pattern for the Python code containing your user-defined functions. For example, `gs://your-bucket/your-transforms/*.py`. +* **pythonRuntimeVersion**: The runtime version to use for this Python UDF. +* **pythonTextTransformFunctionName**: The name of the function to call from your JavaScript file. Use only letters, digits, and underscores. For example, `transform_udf1`. +* **runtimeRetries**: The number of times a runtime will be retried before failing. Defaults to: 5. +* **useStorageWriteApi**: If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **numStorageWriteApiStreams**: When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. +* **storageWriteApiTriggeringFrequencySec**: When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. ## User-Defined functions (UDFs) @@ -334,7 +334,7 @@ resource "google_dataflow_flex_template_job" "cloud_datastream_to_bigquery" { # outputProjectId = "" # outputStagingTableNameTemplate = "{_metadata_table}_log" # outputTableNameTemplate = "{_metadata_table}" - # ignoreFields = "_metadata_stream,_metadata_schema" + # ignoreFields = "_metadata_stream,_metadata_schema,_metadata_table,_metadata_source,_metadata_tx_id,_metadata_dlq_reconsumed,_metadata_primary_keys,_metadata_error,_metadata_retry_count" # mergeFrequencyMinutes = "5" # dlqRetryMinutes = "10" # dataStreamRootUrl = "https://datastream.googleapis.com/" @@ -342,12 +342,12 @@ resource "google_dataflow_flex_template_job" "cloud_datastream_to_bigquery" { # mergeConcurrency = "30" # partitionRetentionDays = "1" # useStorageWriteApiAtLeastOnce = "false" - # javascriptTextTransformGcsPath = "gs://my-bucket/my-udfs/my_file.js" + # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" # javascriptTextTransformReloadIntervalMinutes = "0" - # pythonTextTransformGcsPath = "gs://your-bucket/your-transforms/*.py" + # pythonTextTransformGcsPath = "" # pythonRuntimeVersion = "" - # pythonTextTransformFunctionName = "transform_udf1" + # pythonTextTransformFunctionName = "" # runtimeRetries = "5" # useStorageWriteApi = "false" # numStorageWriteApiStreams = "0" diff --git a/v2/datastream-to-bigquery/src/main/java/com/google/cloud/teleport/v2/templates/DataStreamToBigQuery.java b/v2/datastream-to-bigquery/src/main/java/com/google/cloud/teleport/v2/templates/DataStreamToBigQuery.java index b55fb26204..5b29c3acff 100644 --- a/v2/datastream-to-bigquery/src/main/java/com/google/cloud/teleport/v2/templates/DataStreamToBigQuery.java +++ b/v2/datastream-to-bigquery/src/main/java/com/google/cloud/teleport/v2/templates/DataStreamToBigQuery.java @@ -149,7 +149,7 @@ public interface Options groupName = "Source", description = "File location for Datastream file output in Cloud Storage.", helpText = - "The file location for Datastream file output in Cloud Storage, in the format: gs:////.") + "The file location for Datastream file output in Cloud Storage, in the format `gs:////`.") String getInputFilePattern(); void setInputFilePattern(String value); @@ -159,7 +159,7 @@ public interface Options enumOptions = {@TemplateEnumOption("avro"), @TemplateEnumOption("json")}, description = "Datastream output file format (avro/json).", helpText = - "The format of the output files produced by Datastream. Value can be 'avro' or 'json'. Defaults to: avro.") + "The format of the output files produced by Datastream. Allowed values are `avro` and `json`. Defaults to `avro`.") @Default.String("avro") String getInputFileFormat(); @@ -169,7 +169,7 @@ public interface Options order = 3, description = "The Pub/Sub subscription on the Cloud Storage bucket.", helpText = - "The Pub/Sub subscription used by Cloud Storage to notify Dataflow of new files available for processing, in the format: projects//subscriptions/.") + "The Pub/Sub subscription used by Cloud Storage to notify Dataflow of new files available for processing, in the format: `projects//subscriptions/`.") String getGcsPubSubSubscription(); void setGcsPubSubSubscription(String value); @@ -191,7 +191,7 @@ public interface Options "The starting DateTime used to fetch from Cloud Storage " + "(https://tools.ietf.org/html/rfc3339).", helpText = - "The starting DateTime to use to fetch data from Cloud Storage (https://tools.ietf.org/html/rfc3339). Defaults to: 1970-01-01T00:00:00.00Z.") + "The starting DateTime to use to fetch data from Cloud Storage (https://tools.ietf.org/html/rfc3339). Defaults to: `1970-01-01T00:00:00.00Z`.") @Default.String("1970-01-01T00:00:00.00Z") String getRfcStartDateTime(); @@ -201,7 +201,7 @@ public interface Options order = 6, optional = true, description = "File read concurrency", - helpText = "The number of concurrent DataStream files to read. Default is 10.") + helpText = "The number of concurrent DataStream files to read. Default is `10`.") @Default.Integer(10) Integer getFileReadConcurrency(); @@ -223,7 +223,7 @@ public interface Options groupName = "Target", description = "Name or template for the dataset to contain staging tables.", helpText = - "The name of the dataset that contains staging tables. This parameter supports templates, for example {_metadata_dataset}_log or my_dataset_log. Normally, this parameter is a dataset name. Defaults to: {_metadata_dataset}.") + "The name of the dataset that contains staging tables. This parameter supports templates, for example `{_metadata_dataset}_log` or `my_dataset_log`. Normally, this parameter is a dataset name. Defaults to `{_metadata_dataset}`.") @Default.String("{_metadata_dataset}") String getOutputStagingDatasetTemplate(); @@ -235,7 +235,7 @@ public interface Options groupName = "Target", description = "Template for the name of staging tables.", helpText = - "The template to use to name the staging tables. For example, {_metadata_table}). Defaults to: {_metadata_table}_log.") + "The template to use to name the staging tables. For example, `{_metadata_table}`. Defaults to `{_metadata_table}_log`.") @Default.String("{_metadata_table}_log") String getOutputStagingTableNameTemplate(); @@ -246,7 +246,7 @@ public interface Options groupName = "Target", description = "Template for the dataset to contain replica tables.", helpText = - "The name of the dataset that contains the replica tables. This parameter supports templates, for example {_metadata_dataset} or my_dataset. Normally, this parameter is a dataset name. Defaults to: {_metadata_dataset}.") + "The name of the dataset that contains the replica tables. This parameter supports templates, for example `{_metadata_dataset}` or `my_dataset`. Normally, this parameter is a dataset name. Defaults to `{_metadata_dataset}`.") @Default.String("{_metadata_dataset}") String getOutputDatasetTemplate(); @@ -258,7 +258,7 @@ public interface Options optional = true, description = "Template for the name of replica tables.", helpText = - "The template to use for the name of the replica tables, for example {_metadata_table}. Defaults to: {_metadata_table}.") + "The template to use for the name of the replica tables, for example `{_metadata_table}`. Defaults to `{_metadata_table}`.") @Default.String("{_metadata_table}") String getOutputTableNameTemplate(); @@ -269,7 +269,7 @@ public interface Options optional = true, description = "Fields to be ignored", helpText = - "Comma-separated fields to ignore in BigQuery. Defaults to: _metadata_stream,_metadata_schema,_metadata_table,_metadata_source,_metadata_tx_id,_metadata_dlq_reconsumed,_metadata_primary_keys,_metadata_error,_metadata_retry_count.", + "Comma-separated fields to ignore in BigQuery. Defaults to: `_metadata_stream,_metadata_schema,_metadata_table,_metadata_source,_metadata_tx_id,_metadata_dlq_reconsumed,_metadata_primary_keys,_metadata_error,_metadata_retry_count`.", example = "_metadata_stream,_metadata_schema") @Default.String( "_metadata_stream,_metadata_schema,_metadata_table,_metadata_source," @@ -283,7 +283,7 @@ public interface Options order = 13, optional = true, description = "The number of minutes between merges for a given table", - helpText = "The number of minutes between merges for a given table. Defaults to: 5.") + helpText = "The number of minutes between merges for a given table. Defaults to `5`.") @Default.Integer(5) Integer getMergeFrequencyMinutes(); @@ -293,7 +293,7 @@ public interface Options order = 14, description = "Dead letter queue directory.", helpText = - "The path that Dataflow uses to write the dead-letter queue output. This path must not be in the same path as the Datastream file output. Defaults to empty.") + "The path that Dataflow uses to write the dead-letter queue output. This path must not be in the same path as the Datastream file output. Defaults to `empty`.") @Default.String("") String getDeadLetterQueueDirectory(); @@ -303,7 +303,7 @@ public interface Options order = 15, optional = true, description = "The number of minutes between DLQ Retries.", - helpText = "The number of minutes between DLQ Retries. Defaults to: 10.") + helpText = "The number of minutes between DLQ Retries. Defaults to `10`.") @Default.Integer(10) Integer getDlqRetryMinutes(); @@ -323,7 +323,7 @@ public interface Options order = 17, optional = true, description = "A switch to disable MERGE queries for the job.", - helpText = "Whether to disable MERGE queries for the job. Defaults to: true.") + helpText = "Whether to disable MERGE queries for the job. Defaults to `true`.") @Default.Boolean(true) Boolean getApplyMerge(); @@ -336,7 +336,7 @@ public interface Options parentTriggerValues = {"true"}, description = "Concurrent queries for merge.", helpText = - "The number of concurrent BigQuery MERGE queries. Only effective when applyMerge is set to true. Defaults to: 30.") + "The number of concurrent BigQuery MERGE queries. Only effective when applyMerge is set to true. Defaults to `30`.") @Default.Integer(MergeConfiguration.DEFAULT_MERGE_CONCURRENCY) Integer getMergeConcurrency(); @@ -347,7 +347,7 @@ public interface Options optional = true, description = "Partition retention days.", helpText = - "The number of days to use for partition retention when running BigQuery merges. Defaults to: 1.") + "The number of days to use for partition retention when running BigQuery merges. Defaults to `1`.") @Default.Integer(MergeConfiguration.DEFAULT_PARTITION_RETENTION_DAYS) Integer getPartitionRetentionDays(); @@ -360,7 +360,7 @@ public interface Options parentTriggerValues = {"true"}, description = "Use at at-least-once semantics in BigQuery Storage Write API", helpText = - "This parameter takes effect only if \"Use BigQuery Storage Write API\" is enabled. If true, at-least-once semantics are used for the Storage Write API. Otherwise, exactly-once semantics are used. Defaults to: false.", + "This parameter takes effect only if `Use BigQuery Storage Write API` is enabled. If `true`, at-least-once semantics are used for the Storage Write API. Otherwise, exactly-once semantics are used. Defaults to `false`.", hiddenUi = true) @Default.Boolean(false) @Override diff --git a/v2/datastream-to-spanner/README_Cloud_Datastream_to_Spanner.md b/v2/datastream-to-spanner/README_Cloud_Datastream_to_Spanner.md index 1f68678427..1843ce7a61 100644 --- a/v2/datastream-to-spanner/README_Cloud_Datastream_to_Spanner.md +++ b/v2/datastream-to-spanner/README_Cloud_Datastream_to_Spanner.md @@ -42,41 +42,37 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **instanceId** : The Spanner instance where the changes are replicated. -* **databaseId** : The Spanner database where the changes are replicated. +* **instanceId**: The Spanner instance where the changes are replicated. +* **databaseId**: The Spanner database where the changes are replicated. +* **streamName**: The name or template for the stream to poll for schema information and source type. ### Optional parameters -* **inputFilePattern** : The Cloud Storage file location that contains the Datastream files to replicate. Typically, this is the root path for a stream. Support for this feature has been disabled. -* **inputFileFormat** : The format of the output file produced by Datastream. For example `avro,json`. Default, `avro`. -* **sessionFilePath** : Session file path in Cloud Storage that contains mapping information from HarbourBridge. -* **projectId** : The Spanner project ID. -* **spannerHost** : The Cloud Spanner endpoint to call in the template. (Example: https://batch-spanner.googleapis.com). Defaults to: https://batch-spanner.googleapis.com. -* **gcsPubSubSubscription** : The Pub/Sub subscription being used in a Cloud Storage notification policy. The name should be in the format of projects//subscriptions/. -* **streamName** : The name or template for the stream to poll for schema information and source type. -* **shadowTablePrefix** : The prefix used to name shadow tables. Default: `shadow_`. -* **shouldCreateShadowTables** : This flag indicates whether shadow tables must be created in Cloud Spanner database. Defaults to: true. -* **rfcStartDateTime** : The starting DateTime used to fetch from Cloud Storage (https://tools.ietf.org/html/rfc3339). Defaults to: 1970-01-01T00:00:00.00Z. -* **fileReadConcurrency** : The number of concurrent DataStream files to read. Defaults to: 30. -* **deadLetterQueueDirectory** : The file path used when storing the error queue output. The default file path is a directory under the Dataflow job's temp location. -* **dlqRetryMinutes** : The number of minutes between dead letter queue retries. Defaults to 10. -* **dlqMaxRetryCount** : The max number of times temporary errors can be retried through DLQ. Defaults to 500. -* **dataStreamRootUrl** : Datastream API Root URL. Defaults to: https://datastream.googleapis.com/. -* **datastreamSourceType** : This is the type of source database that Datastream connects to. Example - mysql/oracle. Need to be set when testing without an actual running Datastream. -* **roundJsonDecimals** : This flag if set, rounds the decimal values in json columns to a number that can be stored without loss of precision. Defaults to: false. -* **runMode** : This is the run mode type, whether regular or with retryDLQ. Defaults to: regular. -* **transformationContextFilePath** : Transformation context file path in cloud storage used to populate data used in transformations performed during migrations Eg: The shard id to db name to identify the db from which a row was migrated. -* **directoryWatchDurationInMinutes** : The Duration for which the pipeline should keep polling a directory in GCS. Datastreamoutput files are arranged in a directory structure which depicts the timestamp of the event grouped by minutes. This parameter should be approximately equal tomaximum delay which could occur between event occurring in source database and the same event being written to GCS by Datastream. 99.9 percentile = 10 minutes. Defaults to: 10. -* **spannerPriority** : The request priority for Cloud Spanner calls. The value must be one of: [HIGH,MEDIUM,LOW]. Defaults to HIGH. -* **dlqGcsPubSubSubscription** : The Pub/Sub subscription being used in a Cloud Storage notification policy for DLQ retry directory when running in regular mode. The name should be in the format of projects//subscriptions/. When set, the deadLetterQueueDirectory and dlqRetryMinutes are ignored. -* **transformationJarPath** : Custom jar location in Cloud Storage that contains the custom transformation logic for processing records in forward migration. Defaults to empty. -* **transformationClassName** : Fully qualified class name having the custom transformation logic. It is a mandatory field in case transformationJarPath is specified. Defaults to empty. -* **transformationCustomParameters** : String containing any custom parameters to be passed to the custom transformation class. Defaults to empty. -* **filteredEventsDirectory** : This is the file path to store the events filtered via custom transformation. Default is a directory under the Dataflow job's temp location. The default value is enough under most conditions. -* **shardingContextFilePath** : Sharding context file path in cloud storage is used to populate the shard id in spanner database for each source shard.It is of the format Map>. -* **tableOverrides** : These are the table name overrides from source to spanner. They are written in thefollowing format: [{SourceTableName1, SpannerTableName1}, {SourceTableName2, SpannerTableName2}]This example shows mapping Singers table to Vocalists and Albums table to Records. (Example: [{Singers, Vocalists}, {Albums, Records}]). Defaults to empty. -* **columnOverrides** : These are the column name overrides from source to spanner. They are written in thefollowing format: [{SourceTableName1.SourceColumnName1, SourceTableName1.SpannerColumnName1}, {SourceTableName2.SourceColumnName1, SourceTableName2.SpannerColumnName1}]Note that the SourceTableName should remain the same in both the source and spanner pair. To override table names, use tableOverrides.The example shows mapping SingerName to TalentName and AlbumName to RecordName in Singers and Albums table respectively. (Example: [{Singers.SingerName, Singers.TalentName}, {Albums.AlbumName, Albums.RecordName}]). Defaults to empty. -* **schemaOverridesFilePath** : A file which specifies the table and the column name overrides from source to spanner. Defaults to empty. +* **inputFilePattern**: The Cloud Storage file location that contains the Datastream files to replicate. Typically, this is the root path for a stream. Support for this feature has been disabled. +* **inputFileFormat**: The format of the output file produced by Datastream. For example `avro,json`. Defaults to `avro`. +* **sessionFilePath**: Session file path in Cloud Storage that contains mapping information from HarbourBridge. +* **projectId**: The Spanner project ID. +* **spannerHost**: The Cloud Spanner endpoint to call in the template. For example, `https://batch-spanner.googleapis.com`. Defaults to: https://batch-spanner.googleapis.com. +* **gcsPubSubSubscription**: The Pub/Sub subscription being used in a Cloud Storage notification policy. For the name, use the format `projects//subscriptions/`. +* **shadowTablePrefix**: The prefix used to name shadow tables. Default: `shadow_`. +* **shouldCreateShadowTables**: This flag indicates whether shadow tables must be created in Cloud Spanner database. Defaults to: true. +* **rfcStartDateTime**: The starting DateTime used to fetch from Cloud Storage (https://tools.ietf.org/html/rfc3339). Defaults to: 1970-01-01T00:00:00.00Z. +* **fileReadConcurrency**: The number of concurrent DataStream files to read. Defaults to: 30. +* **deadLetterQueueDirectory**: The file path used when storing the error queue output. The default file path is a directory under the Dataflow job's temp location. +* **dlqRetryMinutes**: The number of minutes between dead letter queue retries. Defaults to `10`. +* **dlqMaxRetryCount**: The max number of times temporary errors can be retried through DLQ. Defaults to `500`. +* **dataStreamRootUrl**: Datastream API Root URL. Defaults to: https://datastream.googleapis.com/. +* **datastreamSourceType**: This is the type of source database that Datastream connects to. Example - mysql/oracle. Need to be set when testing without an actual running Datastream. +* **roundJsonDecimals**: This flag if set, rounds the decimal values in json columns to a number that can be stored without loss of precision. Defaults to: false. +* **runMode**: This is the run mode type, whether regular or with retryDLQ. Defaults to: regular. +* **transformationContextFilePath**: Transformation context file path in cloud storage used to populate data used in transformations performed during migrations Eg: The shard id to db name to identify the db from which a row was migrated. +* **directoryWatchDurationInMinutes**: The Duration for which the pipeline should keep polling a directory in GCS. Datastreamoutput files are arranged in a directory structure which depicts the timestamp of the event grouped by minutes. This parameter should be approximately equal tomaximum delay which could occur between event occurring in source database and the same event being written to GCS by Datastream. 99.9 percentile = 10 minutes. Defaults to: 10. +* **spannerPriority**: The request priority for Cloud Spanner calls. The value must be one of: [`HIGH`,`MEDIUM`,`LOW`]. Defaults to `HIGH`. +* **dlqGcsPubSubSubscription**: The Pub/Sub subscription being used in a Cloud Storage notification policy for DLQ retry directory when running in regular mode. For the name, use the format `projects//subscriptions/`. When set, the deadLetterQueueDirectory and dlqRetryMinutes are ignored. +* **transformationJarPath**: Custom JAR file location in Cloud Storage for the file that contains the custom transformation logic for processing records in forward migration. Defaults to empty. +* **transformationClassName**: Fully qualified class name having the custom transformation logic. It is a mandatory field in case transformationJarPath is specified. Defaults to empty. +* **transformationCustomParameters**: String containing any custom parameters to be passed to the custom transformation class. Defaults to empty. +* **filteredEventsDirectory**: This is the file path to store the events filtered via custom transformation. Default is a directory under the Dataflow job's temp location. The default value is enough under most conditions. @@ -333,6 +329,7 @@ resource "google_dataflow_flex_template_job" "cloud_datastream_to_spanner" { parameters = { instanceId = "" databaseId = "" + streamName = "" # inputFilePattern = "" # inputFileFormat = "avro" # sessionFilePath = "" diff --git a/v2/datastream-to-spanner/src/main/java/com/google/cloud/teleport/v2/templates/DataStreamToSpanner.java b/v2/datastream-to-spanner/src/main/java/com/google/cloud/teleport/v2/templates/DataStreamToSpanner.java index 47ffe44837..d16fe44497 100644 --- a/v2/datastream-to-spanner/src/main/java/com/google/cloud/teleport/v2/templates/DataStreamToSpanner.java +++ b/v2/datastream-to-spanner/src/main/java/com/google/cloud/teleport/v2/templates/DataStreamToSpanner.java @@ -168,7 +168,7 @@ public interface Options optional = true, description = "Datastream output file format (avro/json).", helpText = - "The format of the output file produced by Datastream. For example `avro,json`. Default, `avro`.") + "The format of the output file produced by Datastream. For example `avro,json`. Defaults to `avro`.") @Default.String("avro") String getInputFileFormat(); @@ -230,9 +230,8 @@ public interface Options optional = true, description = "The Pub/Sub subscription being used in a Cloud Storage notification policy.", helpText = - "The Pub/Sub subscription being used in a Cloud Storage notification policy. The name" - + " should be in the format of" - + " projects//subscriptions/.") + "The Pub/Sub subscription being used in a Cloud Storage notification policy. For the name," + + " use the format `projects//subscriptions/`.") String getGcsPubSubSubscription(); void setGcsPubSubSubscription(String value); @@ -309,7 +308,7 @@ public interface Options order = 15, optional = true, description = "Dead letter queue retry minutes", - helpText = "The number of minutes between dead letter queue retries. Defaults to 10.") + helpText = "The number of minutes between dead letter queue retries. Defaults to `10`.") @Default.Integer(10) Integer getDlqRetryMinutes(); @@ -320,7 +319,7 @@ public interface Options optional = true, description = "Dead letter queue maximum retry count", helpText = - "The max number of times temporary errors can be retried through DLQ. Defaults to 500.") + "The max number of times temporary errors can be retried through DLQ. Defaults to `500`.") @Default.Integer(500) Integer getDlqMaxRetryCount(); @@ -412,7 +411,7 @@ public interface Options description = "Priority for Spanner RPC invocations", helpText = "The request priority for Cloud Spanner calls. The value must be one of:" - + " [HIGH,MEDIUM,LOW]. Defaults to HIGH") + + " [`HIGH`,`MEDIUM`,`LOW`]. Defaults to `HIGH`.") @Default.Enum("HIGH") RpcPriority getSpannerPriority(); @@ -426,8 +425,8 @@ public interface Options + " retry directory when running in regular mode.", helpText = "The Pub/Sub subscription being used in a Cloud Storage notification policy for DLQ" - + " retry directory when running in regular mode. The name should be in the format" - + " of projects//subscriptions/. When set, the" + + " retry directory when running in regular mode. For the name, use the format" + + " `projects//subscriptions/`. When set, the" + " deadLetterQueueDirectory and dlqRetryMinutes are ignored.") String getDlqGcsPubSubSubscription(); @@ -438,7 +437,7 @@ public interface Options optional = true, description = "Custom jar location in Cloud Storage", helpText = - "Custom jar location in Cloud Storage that contains the custom transformation logic for processing records" + "Custom JAR file location in Cloud Storage for the file that contains the custom transformation logic for processing records" + " in forward migration.") @Default.String("") String getTransformationJarPath(); diff --git a/v2/datastream-to-sql/README_Cloud_Datastream_to_SQL.md b/v2/datastream-to-sql/README_Cloud_Datastream_to_SQL.md index d8ad3a9ab2..97fb5274a9 100644 --- a/v2/datastream-to-sql/README_Cloud_Datastream_to_SQL.md +++ b/v2/datastream-to-sql/README_Cloud_Datastream_to_SQL.md @@ -37,23 +37,23 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputFilePattern** : The file location for the Datastream files in Cloud Storage to replicate. This file location is typically the root path for the stream. -* **databaseHost** : The SQL host to connect on. -* **databaseUser** : The SQL user with all required permissions to write to all tables in replication. -* **databasePassword** : The password for the SQL user. +* **inputFilePattern**: The file location for the Datastream files in Cloud Storage to replicate. This file location is typically the root path for the stream. +* **databaseHost**: The SQL host to connect on. +* **databaseUser**: The SQL user with all required permissions to write to all tables in replication. +* **databasePassword**: The password for the SQL user. ### Optional parameters -* **gcsPubSubSubscription** : The Pub/Sub subscription with Datastream file notifications. For example, `projects//subscriptions/`. -* **inputFileFormat** : The format of the output file produced by Datastream. For example, `avro` or `json`. Defaults to `avro`. -* **streamName** : The name or template for the stream to poll for schema information. The default value is `{_metadata_stream}`. -* **rfcStartDateTime** : The starting DateTime used to fetch from Cloud Storage (https://tools.ietf.org/html/rfc3339). Defaults to: 1970-01-01T00:00:00.00Z. -* **dataStreamRootUrl** : Datastream API Root URL. Defaults to: https://datastream.googleapis.com/. -* **databaseType** : The database type to write to (for example, Postgres). Defaults to: postgres. -* **databasePort** : The SQL database port to connect to. The default value is `5432`. -* **databaseName** : The name of the SQL database to connect to. The default value is `postgres`. -* **schemaMap** : A map of key/values used to dictate schema name changes (ie. old_name:new_name,CaseError:case_error). Defaults to empty. -* **customConnectionString** : Optional connection string which will be used instead of the default database string. +* **gcsPubSubSubscription**: The Pub/Sub subscription with Datastream file notifications. For example, `projects//subscriptions/`. +* **inputFileFormat**: The format of the output file produced by Datastream. For example, `avro` or `json`. Defaults to `avro`. +* **streamName**: The name or template for the stream to poll for schema information. The default value is `{_metadata_stream}`. +* **rfcStartDateTime**: The starting DateTime used to fetch from Cloud Storage (https://tools.ietf.org/html/rfc3339). Defaults to: 1970-01-01T00:00:00.00Z. +* **dataStreamRootUrl**: Datastream API Root URL. Defaults to: https://datastream.googleapis.com/. +* **databaseType**: The database type to write to (for example, Postgres). Defaults to: postgres. +* **databasePort**: The SQL database port to connect to. The default value is `5432`. +* **databaseName**: The name of the SQL database to connect to. The default value is `postgres`. +* **schemaMap**: A map of key/values used to dictate schema name changes (ie. old_name:new_name,CaseError:case_error). Defaults to empty. +* **customConnectionString**: Optional connection string which will be used instead of the default database string. diff --git a/v2/elasticsearch-common/src/main/java/com/google/cloud/teleport/v2/elasticsearch/options/ElasticsearchWriteOptions.java b/v2/elasticsearch-common/src/main/java/com/google/cloud/teleport/v2/elasticsearch/options/ElasticsearchWriteOptions.java index 046a3a072c..3354554621 100644 --- a/v2/elasticsearch-common/src/main/java/com/google/cloud/teleport/v2/elasticsearch/options/ElasticsearchWriteOptions.java +++ b/v2/elasticsearch-common/src/main/java/com/google/cloud/teleport/v2/elasticsearch/options/ElasticsearchWriteOptions.java @@ -30,7 +30,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { groupName = "Target", description = "Elasticsearch URL or CloudID if using Elastic Cloud", helpText = - "The Elasticsearch URL in the format https://hostname:[port]. If using Elastic Cloud, specify the CloudID.", + "The Elasticsearch URL in the format `https://hostname:[port]`. If using Elastic Cloud, specify the CloudID.", example = "https://elasticsearch-host:9200") @Validation.Required String getConnectionUrl(); @@ -51,7 +51,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "Username for Elasticsearch endpoint", helpText = - "The Elasticsearch username to authenticate with. If specified, the value of 'apiKey' is ignored") + "The Elasticsearch username to authenticate with. If specified, the value of `apiKey` is ignored") String getElasticsearchUsername(); void setElasticsearchUsername(String elasticsearchUsername); @@ -61,7 +61,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "Password for Elasticsearch endpoint", helpText = - "The Elasticsearch password to authenticate with. If specified, the value of 'apiKey' is ignored.") + "The Elasticsearch password to authenticate with. If specified, the value of `apiKey` is ignored.") String getElasticsearchPassword(); void setElasticsearchPassword(String elasticsearchPassword); @@ -71,7 +71,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = false, regexes = {"[a-zA-Z0-9._-]+"}, description = "Elasticsearch index", - helpText = "The Elasticsearch index that the requests are issued to, such as `my-index.`", + helpText = "The Elasticsearch index that the requests are issued to.", example = "my-index") String getIndex(); @@ -81,7 +81,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { order = 6, optional = true, description = "Batch Size", - helpText = "The batch size in number of documents. Defaults to: 1000.") + helpText = "The batch size in number of documents. Defaults to `1000`.") @Default.Long(1000) Long getBatchSize(); @@ -91,7 +91,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { order = 7, optional = true, description = "Batch Size in Bytes", - helpText = "The batch size in number of bytes. Defaults to: 5242880 (5mb).") + helpText = "The batch size in number of bytes. Defaults to `5242880` (5mb).") @Default.Long(5242880) Long getBatchSizeBytes(); @@ -102,7 +102,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "Max retry attempts.", helpText = - "The maximum number of retry attempts. Must be greater than zero. Defaults to: no retries.") + "The maximum number of retry attempts. Must be greater than zero. Defaults to `no retries`.") Integer getMaxRetryAttempts(); void setMaxRetryAttempts(Integer maxRetryAttempts); @@ -112,7 +112,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "Max retry duration.", helpText = - "The maximum retry duration in milliseconds. Must be greater than zero. Defaults to: no retries.") + "The maximum retry duration in milliseconds. Must be greater than zero. Defaults to `no retries`.") Long getMaxRetryDuration(); void setMaxRetryDuration(Long maxRetryDuration); @@ -122,7 +122,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "Document property to specify _index metadata", helpText = - "The property in the document being indexed whose value specifies `_index` metadata to include with the document in bulk requests. Takes precedence over an `_index` UDF. Defaults to: none.") + "The property in the document being indexed whose value specifies `_index` metadata to include with the document in bulk requests. Takes precedence over an `_index` UDF. Defaults to `none`.") String getPropertyAsIndex(); void setPropertyAsIndex(String propertyAsIndex); @@ -132,7 +132,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "Cloud Storage path to JavaScript UDF source for _index metadata", helpText = - "The Cloud Storage path to the JavaScript UDF source for a function that specifies `_index` metadata to include with the document in bulk requests. Defaults to: none.") + "The Cloud Storage path to the JavaScript UDF source for a function that specifies `_index` metadata to include with the document in bulk requests. Defaults to `none`.") String getJavaScriptIndexFnGcsPath(); void setJavaScriptIndexFnGcsPath(String javaScriptTextTransformGcsPath); @@ -142,7 +142,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "UDF JavaScript Function Name for _index metadata", helpText = - "The name of the UDF JavaScript function that specifies `_index` metadata to include with the document in bulk requests. Defaults to: none.") + "The name of the UDF JavaScript function that specifies `_index` metadata to include with the document in bulk requests. Defaults to `none`.") String getJavaScriptIndexFnName(); void setJavaScriptIndexFnName(String javaScriptTextTransformFunctionName); @@ -152,7 +152,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "Document property to specify _id metadata", helpText = - "A property in the document being indexed whose value specifies `_id` metadata to include with the document in bulk requests. Takes precedence over an `_id` UDF. Defaults to: none.") + "A property in the document being indexed whose value specifies `_id` metadata to include with the document in bulk requests. Takes precedence over an `_id` UDF. Defaults to `none`.") String getPropertyAsId(); void setPropertyAsId(String propertyAsId); @@ -162,7 +162,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "Cloud Storage path to JavaScript UDF source for _id metadata", helpText = - "The Cloud Storage path to the JavaScript UDF source for the function that specifies `_id` metadata to include with the document in bulk requests. Defaults to: none.") + "The Cloud Storage path to the JavaScript UDF source for the function that specifies `_id` metadata to include with the document in bulk requests. Defaults to `none`.") String getJavaScriptIdFnGcsPath(); void setJavaScriptIdFnGcsPath(String javaScriptTextTransformGcsPath); @@ -172,7 +172,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "UDF JavaScript Function Name for _id metadata", helpText = - "The name of the UDF JavaScript function that specifies the `_id` metadata to include with the document in bulk requests. Defaults to: none.") + "The name of the UDF JavaScript function that specifies the `_id` metadata to include with the document in bulk requests. Defaults to `none`.") String getJavaScriptIdFnName(); void setJavaScriptIdFnName(String javaScriptTextTransformFunctionName); @@ -182,7 +182,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "Cloud Storage path to JavaScript UDF source for _type metadata", helpText = - "The Cloud Storage path to the JavaScript UDF source for a function that specifies `_type` metadata to include with documents in bulk requests. Default: none.") + "The Cloud Storage path to the JavaScript UDF source for a function that specifies `_type` metadata to include with documents in bulk requests. Defaults to `none`.") String getJavaScriptTypeFnGcsPath(); void setJavaScriptTypeFnGcsPath(String javaScriptTextTransformGcsPath); @@ -192,7 +192,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "UDF JavaScript Function Name for _type metadata", helpText = - "The name of the UDF JavaScript function that specifies the `_type` metadata to include with the document in bulk requests. Defaults to: none.") + "The name of the UDF JavaScript function that specifies the `_type` metadata to include with the document in bulk requests. Defaults to `none`.") String getJavaScriptTypeFnName(); void setJavaScriptTypeFnName(String javaScriptTextTransformFunctionName); @@ -202,7 +202,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "Cloud Storage path to JavaScript UDF source for isDelete function", helpText = - "The Cloud Storage path to the JavaScript UDF source for the function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to: none.") + "The Cloud Storage path to the JavaScript UDF source for the function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to `none`.") String getJavaScriptIsDeleteFnGcsPath(); void setJavaScriptIsDeleteFnGcsPath(String javaScriptTextTransformGcsPath); @@ -212,7 +212,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "UDF JavaScript Function Name for isDelete", helpText = - "The name of the UDF JavaScript function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to: none.") + "The name of the UDF JavaScript function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to `none`.") String getJavaScriptIsDeleteFnName(); void setJavaScriptIsDeleteFnName(String javaScriptTextTransformFunctionName); @@ -222,7 +222,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "Use partial updates", helpText = - "Whether to use partial updates (update rather than create or index, allowing partial documents) with Elasticsearch requests. Defaults to: false.") + "Whether to use partial updates (update rather than create or index, allowing partial documents) with Elasticsearch requests. Defaults to `false`.") @Default.Boolean(false) Boolean getUsePartialUpdate(); @@ -234,7 +234,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "Build insert method", helpText = - "Whether to use `INDEX` (index, allows upserts) or `CREATE` (create, errors on duplicate _id) with Elasticsearch bulk requests. Defaults to: CREATE.") + "Whether to use `INDEX` (index, allows upserts) or `CREATE` (create, errors on duplicate _id) with Elasticsearch bulk requests. Defaults to `CREATE`.") @Default.Enum("CREATE") BulkInsertMethodOptions getBulkInsertMethod(); @@ -245,7 +245,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "Trust self-signed certificate", helpText = - "Whether to trust self-signed certificate or not. An Elasticsearch instance installed might have a self-signed certificate, Enable this to True to by-pass the validation on SSL certificate. (default is False)") + "Whether to trust self-signed certificate or not. An Elasticsearch instance installed might have a self-signed certificate, Enable this to true to by-pass the validation on SSL certificate. (Defaults to: `false`)") @Default.Boolean(false) Boolean getTrustSelfSignedCerts(); @@ -256,8 +256,8 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { optional = true, description = "Disable SSL certificate validation.", helpText = - "If 'true', trust the self-signed SSL certificate. An Elasticsearch instance might have a " - + "self-signed certificate. To bypass validation for the certificate, set this parameter to 'true'. Default: false.") + "If `true`, trust the self-signed SSL certificate. An Elasticsearch instance might have a " + + "self-signed certificate. To bypass validation for the certificate, set this parameter to `true`. Defaults to `false`.") @Default.Boolean(false) Boolean getDisableCertificateValidation(); @@ -270,11 +270,11 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { parentTriggerValues = {"KMS"}, description = "Google Cloud KMS encryption key for the API key", helpText = - "The Cloud KMS key to decrypt the API key. This parameter must be " - + "provided if the apiKeySource is set to KMS. If this parameter is provided, apiKey " - + "string should be passed in encrypted. Encrypt parameters using the KMS API encrypt " - + "endpoint. The Key should be in the format " - + "projects/{gcp_project}/locations/{key_region}/keyRings/{key_ring}/cryptoKeys/{kms_key_name}. " + "The Cloud KMS key to decrypt the API key. This parameter is required " + + "if the `apiKeySource` is set to `KMS`. If this parameter is provided, pass in an encrypted `apiKey` string." + + " Encrypt parameters using the KMS API encrypt " + + "endpoint. For the key, use the format " + + "`projects//locations//keyRings//cryptoKeys/`. " + "See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt ", example = "projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name") @@ -290,7 +290,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { regexes = {"^projects\\/[^\\n\\r\\/]+\\/secrets\\/[^\\n\\r\\/]+\\/versions\\/[^\\n\\r\\/]+$"}, description = "Google Cloud Secret Manager ID.", helpText = - "Secret Manager secret ID for the apiKey. This parameter should be provided if the apiKeySource is set to SECRET_MANAGER. Should be in the format projects/{project}/secrets/{secret}/versions/{secret_version}.", + "The Secret Manager secret ID for the apiKey. If the `apiKeySource` is set to `SECRET_MANAGER`, provide this parameter. Use the format `projects//secrets//versions/.", example = "projects/your-project-id/secrets/your-secret/versions/your-secret-version") String getApiKeySecretId(); @@ -306,11 +306,11 @@ public interface ElasticsearchWriteOptions extends PipelineOptions { }, description = "Source of the API key passed. One of PLAINTEXT, KMS or SECRET_MANAGER.", helpText = - "Source of the API key. One of PLAINTEXT, KMS or SECRET_MANAGER. This parameter " - + "must be provided if secret manager or KMS is used. If apiKeySource is set to KMS, " - + "apiKeyKMSEncryptionKey and encrypted apiKey must be provided. If apiKeySource is set to " - + "SECRET_MANAGER, apiKeySecretId must be provided. If apiKeySource is set to PLAINTEXT, " - + "apiKey must be provided.") + "The source of the API key. Allowed values are `PLAINTEXT`, `KMS` orand `SECRET_MANAGER`. This parameter " + + "is required when you use Secret Manager or KMS. If `apiKeySource` is set to `KMS`, " + + "`apiKeyKMSEncryptionKey` and encrypted apiKey must be provided. If `apiKeySource` is set to " + + "`SECRET_MANAGER`, `apiKeySecretId` must be provided. If `apiKeySource` is set to `PLAINTEXT`, " + + "`apiKey` must be provided.") @Default.String("PLAINTEXT") String getApiKeySource(); diff --git a/v2/file-format-conversion/README_File_Format_Conversion.md b/v2/file-format-conversion/README_File_Format_Conversion.md index 97ad5a9114..30cb70305d 100644 --- a/v2/file-format-conversion/README_File_Format_Conversion.md +++ b/v2/file-format-conversion/README_File_Format_Conversion.md @@ -23,24 +23,24 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputFileFormat** : File format of the input files. Needs to be either avro, parquet or csv. -* **outputFileFormat** : File format of the output files. Needs to be either avro or parquet. -* **inputFileSpec** : The Cloud Storage file pattern to search for CSV files. Example: gs://mybucket/test-*.csv. -* **outputBucket** : Cloud storage directory for writing output files. This value must end in a slash. (Example: gs://your-bucket/path/). -* **schema** : Cloud storage path to the avro schema file. (Example: gs://your-bucket/your-path/schema.avsc). +* **inputFileFormat**: File format of the input files. Needs to be either avro, parquet or csv. +* **outputFileFormat**: File format of the output files. Needs to be either avro or parquet. +* **inputFileSpec**: The Cloud Storage file pattern to search for CSV files. For example, `gs://mybucket/test-*.csv`. +* **outputBucket**: Cloud storage directory for writing output files. This value must end in a slash. For example, `gs://your-bucket/path/`. +* **schema**: Cloud storage path to the avro schema file. For example, `gs://your-bucket/your-path/schema.avsc`. ### Optional parameters -* **containsHeaders** : Input CSV files contain a header record (true/false). Only required if reading CSV files. Defaults to: false. -* **deadletterTable** : Messages failed to reach the target for all kind of reasons (e.g., mismatched schema, malformed json) are written to this table. (Example: your-project:your-dataset.your-table-name). -* **delimiter** : The column delimiter of the input text files. Default: use delimiter provided in csvFormat (Example: ,). -* **csvFormat** : CSV format specification to use for parsing records. Default is: Default. See https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.html for more details. Must match format names exactly found at: https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.Predefined.html. -* **jsonSchemaPath** : The path to the JSON schema. Defaults to: null. (Example: gs://path/to/schema). -* **largeNumFiles** : Set to true if number of files is in the tens of thousands. Defaults to: false. -* **csvFileEncoding** : The CSV file character encoding format. Allowed Values are US-ASCII, ISO-8859-1, UTF-8, and UTF-16. Defaults to: UTF-8. -* **logDetailedCsvConversionErrors** : Set to true to enable detailed error logging when CSV parsing fails. Note that this may expose sensitive data in the logs (e.g., if the CSV file contains passwords). Default: false. -* **numShards** : The maximum number of output shards produced when writing. A higher number of shards means higher throughput for writing to Cloud Storage, but potentially higher data aggregation cost across shards when processing output Cloud Storage files. Default value is decided by Dataflow. -* **outputFilePrefix** : The prefix of the files to write to. Defaults to: output. +* **containsHeaders**: Input CSV files contain a header record (true/false). Only required if reading CSV files. Defaults to: false. +* **deadletterTable**: Messages failed to reach the target for all kind of reasons (e.g., mismatched schema, malformed json) are written to this table. For example, `your-project:your-dataset.your-table-name`. +* **delimiter**: The column delimiter of the input text files. Default: `,` For example, `,`. +* **csvFormat**: CSV format specification to use for parsing records. Default is: `Default`. See https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.html for more details. Must match format names exactly found at: https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.Predefined.html. +* **jsonSchemaPath**: The path to the JSON schema. Defaults to `null`. For example, `gs://path/to/schema`. +* **largeNumFiles**: Set to true if number of files is in the tens of thousands. Defaults to `false`. +* **csvFileEncoding**: The CSV file character encoding format. Allowed values are `US-ASCII`, `ISO-8859-1`, `UTF-8`, and `UTF-16`. Defaults to: UTF-8. +* **logDetailedCsvConversionErrors**: Set to `true` to enable detailed error logging when CSV parsing fails. Note that this may expose sensitive data in the logs (e.g., if the CSV file contains passwords). Default: `false`. +* **numShards**: The maximum number of output shards produced when writing. A higher number of shards means higher throughput for writing to Cloud Storage, but potentially higher data aggregation cost across shards when processing output Cloud Storage files. Default value is decided by Dataflow. +* **outputFilePrefix**: The prefix of the files to write to. Defaults to: output. @@ -247,13 +247,13 @@ resource "google_dataflow_flex_template_job" "file_format_conversion" { inputFileFormat = "" outputFileFormat = "" inputFileSpec = "" - outputBucket = "gs://your-bucket/path/" - schema = "gs://your-bucket/your-path/schema.avsc" + outputBucket = "" + schema = "" # containsHeaders = "false" - # deadletterTable = "your-project:your-dataset.your-table-name" - # delimiter = "," + # deadletterTable = "" + # delimiter = "" # csvFormat = "Default" - # jsonSchemaPath = "gs://path/to/schema" + # jsonSchemaPath = "" # largeNumFiles = "false" # csvFileEncoding = "UTF-8" # logDetailedCsvConversionErrors = "false" diff --git a/v2/gcs-to-sourcedb/README_GCS_to_Sourcedb.md b/v2/gcs-to-sourcedb/README_GCS_to_Sourcedb.md index f9abc7a5ad..350ea25037 100644 --- a/v2/gcs-to-sourcedb/README_GCS_to_Sourcedb.md +++ b/v2/gcs-to-sourcedb/README_GCS_to_Sourcedb.md @@ -17,27 +17,27 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **sourceShardsFilePath** : Source shard details file path in Cloud Storage that contains connection profile of source shards. -* **sessionFilePath** : Session file path in Cloud Storage that contains mapping information from HarbourBridge. -* **GCSInputDirectoryPath** : Path from where to read the change stream files. -* **spannerProjectId** : This is the name of the Cloud Spanner project. -* **metadataInstance** : This is the instance to store the shard progress of the files processed. -* **metadataDatabase** : This is the database to store the shard progress of the files processed.. -* **runIdentifier** : The identifier to distinguish between different runs of reverse replication flows. +* **sourceShardsFilePath**: Source shard details file path in Cloud Storage that contains connection profile of source shards. +* **sessionFilePath**: Session file path in Cloud Storage that contains mapping information from HarbourBridge. +* **GCSInputDirectoryPath**: Path from where to read the change stream files. +* **spannerProjectId**: This is the name of the Cloud Spanner project. +* **metadataInstance**: This is the instance to store the shard progress of the files processed. +* **metadataDatabase**: This is the database to store the shard progress of the files processed.. +* **runIdentifier**: The identifier to distinguish between different runs of reverse replication flows. ### Optional parameters -* **sourceType** : This is the type of source database. Currently only mysql is supported. Defaults to: mysql. -* **sourceDbTimezoneOffset** : This is the timezone offset from UTC for the source database. Example value: +10:00. Defaults to: +00:00. -* **timerIntervalInMilliSec** : Controls the time between successive polls to buffer and processing of the resultant records. Defaults to: 1. -* **startTimestamp** : Start time of file for all shards. If not provided, the value is taken from spanner_to_gcs_metadata. If provided, this takes precedence. To be given when running in regular run mode. -* **windowDuration** : The window duration/size in which data is written to Cloud Storage. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). If not provided, the value is taken from spanner_to_gcs_metadata. If provided, this takes precedence. To be given when running in regular run mode. (Example: 5m). -* **runMode** : Regular writes to source db, reprocess does processing the specific shards marked as REPROCESS, resumeFailed does reprocess of all shards in error state, resumeSuccess continues processing shards in successful state, resumeAll continues processing all shards irrespective of state. Defaults to: regular. -* **metadataTableSuffix** : Suffix appended to the spanner_to_gcs_metadata and shard_file_create_progress metadata tables.Useful when doing multiple runs.Only alpha numeric and underscores are allowed. Defaults to empty. -* **transformationJarPath** : Custom jar location in Cloud Storage that contains the custom transformation logic for processing records in reverse replication. Defaults to empty. -* **transformationClassName** : Fully qualified class name having the custom transformation logic. It is a mandatory field in case transformationJarPath is specified. Defaults to empty. -* **transformationCustomParameters** : String containing any custom parameters to be passed to the custom transformation class. Defaults to empty. -* **writeFilteredEventsToGcs** : This is a flag which if set to true will write filtered events from custom transformation to GCS. Defaults to: false. +* **sourceType**: This is the type of source database. Currently only mysql is supported. Defaults to: mysql. +* **sourceDbTimezoneOffset**: This is the timezone offset from UTC for the source database. Example value: +10:00. Defaults to: +00:00. +* **timerIntervalInMilliSec**: Controls the time between successive polls to buffer and processing of the resultant records. Defaults to: 1. +* **startTimestamp**: Start time of file for all shards. If not provided, the value is taken from spanner_to_gcs_metadata. If provided, this takes precedence. To be given when running in regular run mode. +* **windowDuration**: The window duration/size in which data is written to Cloud Storage. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). If not provided, the value is taken from spanner_to_gcs_metadata. If provided, this takes precedence. To be given when running in regular run mode. For example, `5m`. +* **runMode**: Regular writes to source db, reprocess does processing the specific shards marked as REPROCESS, resumeFailed does reprocess of all shards in error state, resumeSuccess continues processing shards in successful state, resumeAll continues processing all shards irrespective of state. Defaults to: regular. +* **metadataTableSuffix**: Suffix appended to the spanner_to_gcs_metadata and shard_file_create_progress metadata tables.Useful when doing multiple runs.Only alpha numeric and underscores are allowed. Defaults to empty. +* **transformationJarPath**: Custom JAR file location in Cloud Storage for the file that contains the custom transformation logic for processing records in reverse replication. Defaults to empty. +* **transformationClassName**: Fully qualified class name for the class that contains the custom transformation logic. When `transformationJarPath` is specified, this field is required. Defaults to empty. +* **transformationCustomParameters**: The string that contains any custom parameters to pass to the custom transformation class. Defaults to empty. +* **writeFilteredEventsToGcs**: When set to `true`, writes filtered events from custom transformation to Cloud Storage. Defaults to: false. @@ -261,7 +261,7 @@ resource "google_dataflow_flex_template_job" "gcs_to_sourcedb" { # sourceDbTimezoneOffset = "+00:00" # timerIntervalInMilliSec = "1" # startTimestamp = "" - # windowDuration = "5m" + # windowDuration = "" # runMode = "regular" # metadataTableSuffix = "" # transformationJarPath = "" diff --git a/v2/gcs-to-sourcedb/src/main/java/com/google/cloud/teleport/v2/templates/GCSToSourceDb.java b/v2/gcs-to-sourcedb/src/main/java/com/google/cloud/teleport/v2/templates/GCSToSourceDb.java index 6066e57cb1..51395e5061 100644 --- a/v2/gcs-to-sourcedb/src/main/java/com/google/cloud/teleport/v2/templates/GCSToSourceDb.java +++ b/v2/gcs-to-sourcedb/src/main/java/com/google/cloud/teleport/v2/templates/GCSToSourceDb.java @@ -125,7 +125,7 @@ public interface Options extends PipelineOptions, StreamingOptions { order = 5, optional = true, description = - "Duration in mili seconds between calls to stateful timer processing.Defaults to 1" + "Duration in mili seconds between calls to stateful timer processing.Defaults to `1`." + " millisecond. ", helpText = "Controls the time between successive polls to buffer and processing of the resultant" @@ -215,7 +215,7 @@ public interface Options extends PipelineOptions, StreamingOptions { }, description = "This type of run mode. Supported values are" - + " regular/reprocess/resumeSucess/resumeFailed/resumeAll. Defaults to regular. All" + + " regular/reprocess/resumeSucess/resumeFailed/resumeAll. Defaults to `regular`. All" + " run modes should have the same run identifier.", helpText = "Regular writes to source db, reprocess does processing the specific shards marked as" @@ -255,7 +255,7 @@ public interface Options extends PipelineOptions, StreamingOptions { optional = true, description = "Custom transformation jar location in Cloud Storage", helpText = - "Custom jar location in Cloud Storage that contains the custom transformation logic for processing records" + "Custom JAR file location in Cloud Storage for the file that contains the custom transformation logic for processing records" + " in reverse replication.") @Default.String("") String getTransformationJarPath(); @@ -267,8 +267,8 @@ public interface Options extends PipelineOptions, StreamingOptions { optional = true, description = "Custom class name for transformation", helpText = - "Fully qualified class name having the custom transformation logic. It is a" - + " mandatory field in case transformationJarPath is specified") + "Fully qualified class name for the class that contains the custom transformation logic. When" + + " `transformationJarPath` is specified, this field is required.") @Default.String("") String getTransformationClassName(); @@ -279,7 +279,7 @@ public interface Options extends PipelineOptions, StreamingOptions { optional = true, description = "Custom parameters for transformation", helpText = - "String containing any custom parameters to be passed to the custom transformation class.") + "The string that contains any custom parameters to pass to the custom transformation class.") @Default.String("") String getTransformationCustomParameters(); @@ -290,7 +290,7 @@ public interface Options extends PipelineOptions, StreamingOptions { optional = true, description = "Write filtered events to GCS", helpText = - "This is a flag which if set to true will write filtered events from custom transformation to GCS.") + "When set to `true`, writes filtered events from custom transformation to Cloud Storage.") @Default.Boolean(false) Boolean getWriteFilteredEventsToGcs(); diff --git a/v2/google-ads-to-googlecloud/README_Google_Ads_to_BigQuery.md b/v2/google-ads-to-googlecloud/README_Google_Ads_to_BigQuery.md index 6315ac03b0..256dcb4c99 100644 --- a/v2/google-ads-to-googlecloud/README_Google_Ads_to_BigQuery.md +++ b/v2/google-ads-to-googlecloud/README_Google_Ads_to_BigQuery.md @@ -17,21 +17,21 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **customerIds** : A list of Google Ads account IDs to use to execute the query. (Example: 12345,67890). -* **query** : The query to use to get the data. See Google Ads Query Language. For example: `SELECT campaign.id, campaign.name FROM campaign`. (Example: SELECT campaign.id, campaign.name FROM campaign). -* **qpsPerWorker** : The rate of query requests per second (QPS) to submit to Google Ads. Divide the desired per pipeline QPS by the maximum number of workers. Avoid exceeding per-account or developer token limits. See Rate Limits (https://developers.google.com/google-ads/api/docs/best-practices/rate-limits). -* **googleAdsClientId** : The OAuth 2.0 client ID that identifies the application. See Create a client ID and client secret (https://developers.google.com/google-ads/api/docs/oauth/cloud-project#create_a_client_id_and_client_secret). -* **googleAdsClientSecret** : The OAuth 2.0 client secret that corresponds to the specified client ID. See Create a client ID and client secret (https://developers.google.com/google-ads/api/docs/oauth/cloud-project#create_a_client_id_and_client_secret). -* **googleAdsRefreshToken** : The OAuth 2.0 refresh token to use to connect to the Google Ads API. See 2-Step Verification (https://developers.google.com/google-ads/api/docs/oauth/2sv). -* **googleAdsDeveloperToken** : The Google Ads developer token to use to connect to the Google Ads API. See Obtain a developer token (https://developers.google.com/google-ads/api/docs/get-started/dev-token). -* **outputTableSpec** : The BigQuery output table location to write the output to. For example, `:.`.Depending on the `createDisposition` specified, the output table might be created automatically using the user provided Avro schema. +* **customerIds**: A list of Google Ads account IDs to use to execute the query. For example, `12345,67890`. +* **query**: The query to use to get the data. See Google Ads Query Language (https://developers.google.com/google-ads/api/docs/query/overview). For example, `SELECT campaign.id, campaign.name FROM campaign`. +* **qpsPerWorker**: The rate of query requests per second (QPS) to submit to Google Ads. Divide the desired per pipeline QPS by the maximum number of workers. Avoid exceeding per-account or developer token limits. See Rate Limits (https://developers.google.com/google-ads/api/docs/best-practices/rate-limits). +* **googleAdsClientId**: The OAuth 2.0 client ID that identifies the application. See Create a client ID and client secret (https://developers.google.com/google-ads/api/docs/oauth/cloud-project#create_a_client_id_and_client_secret). +* **googleAdsClientSecret**: The OAuth 2.0 client secret that corresponds to the specified client ID. See Create a client ID and client secret (https://developers.google.com/google-ads/api/docs/oauth/cloud-project#create_a_client_id_and_client_secret). +* **googleAdsRefreshToken**: The OAuth 2.0 refresh token to use to connect to the Google Ads API. See 2-Step Verification (https://developers.google.com/google-ads/api/docs/oauth/2sv). +* **googleAdsDeveloperToken**: The Google Ads developer token to use to connect to the Google Ads API. See Obtain a developer token (https://developers.google.com/google-ads/api/docs/get-started/dev-token). +* **outputTableSpec**: The BigQuery output table location to write the output to. For example, `:.`.Depending on the `createDisposition` specified, the output table might be created automatically using the user provided Avro schema. ### Optional parameters -* **loginCustomerId** : A Google Ads manager account ID to use to access the account IDs. (Example: 12345). -* **bigQueryTableSchemaPath** : The Cloud Storage path to the BigQuery schema JSON file. If this value is not set, then the schema is inferred from the Proto schema. (Example: gs://MyBucket/bq_schema.json). -* **writeDisposition** : The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. -* **createDisposition** : The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. +* **loginCustomerId**: A Google Ads manager account ID to use to access the account IDs. For example, `12345`. +* **bigQueryTableSchemaPath**: The Cloud Storage path to the BigQuery schema JSON file. If this value is not set, then the schema is inferred from the Proto schema. For example, `gs://MyBucket/bq_schema.json`. +* **writeDisposition**: The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. +* **createDisposition**: The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. @@ -226,16 +226,16 @@ resource "google_dataflow_flex_template_job" "google_ads_to_bigquery" { name = "google-ads-to-bigquery" region = var.region parameters = { - customerIds = "12345,67890" - query = "SELECT campaign.id, campaign.name FROM campaign" + customerIds = "" + query = "" qpsPerWorker = "" googleAdsClientId = "" googleAdsClientSecret = "" googleAdsRefreshToken = "" googleAdsDeveloperToken = "" outputTableSpec = "" - # loginCustomerId = "12345" - # bigQueryTableSchemaPath = "gs://MyBucket/bq_schema.json" + # loginCustomerId = "" + # bigQueryTableSchemaPath = "" # writeDisposition = "WRITE_APPEND" # createDisposition = "CREATE_IF_NEEDED" } diff --git a/v2/google-ads-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/templates/GoogleAdsToBigQuery.java b/v2/google-ads-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/templates/GoogleAdsToBigQuery.java index f6ece31eab..8f49e27329 100644 --- a/v2/google-ads-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/templates/GoogleAdsToBigQuery.java +++ b/v2/google-ads-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/templates/GoogleAdsToBigQuery.java @@ -100,7 +100,7 @@ public interface GoogleAdsToBigQueryOptions extends WriteOptions, GoogleAdsOptio order = 3, description = "Google Ads Query Language query", helpText = - "The query to use to get the data. See Google Ads Query Language. For example: `SELECT campaign.id, campaign.name FROM campaign`.", + "The query to use to get the data. See Google Ads Query Language (https://developers.google.com/google-ads/api/docs/query/overview).", example = "SELECT campaign.id, campaign.name FROM campaign") @Validation.Required String getQuery(); diff --git a/v2/googlecloud-to-elasticsearch/README_BigQuery_to_Elasticsearch.md b/v2/googlecloud-to-elasticsearch/README_BigQuery_to_Elasticsearch.md index 316da150d6..0a3d17ecf3 100644 --- a/v2/googlecloud-to-elasticsearch/README_BigQuery_to_Elasticsearch.md +++ b/v2/googlecloud-to-elasticsearch/README_BigQuery_to_Elasticsearch.md @@ -18,44 +18,43 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **connectionUrl** : The Elasticsearch URL in the format https://hostname:[port]. If using Elastic Cloud, specify the CloudID. (Example: https://elasticsearch-host:9200). -* **apiKey** : The Base64-encoded API key to use for authentication. -* **index** : The Elasticsearch index that the requests are issued to, such as `my-index.` (Example: my-index). +* **connectionUrl**: The Elasticsearch URL in the format `https://hostname:[port]`. If using Elastic Cloud, specify the CloudID. For example, `https://elasticsearch-host:9200`. +* **apiKey**: The Base64-encoded API key to use for authentication. +* **index**: The Elasticsearch index that the requests are issued to. For example, `my-index`. ### Optional parameters -* **inputTableSpec** : The BigQuery table to read from. Format: `projectId:datasetId.tablename`. If you specify `inputTableSpec`, the template reads the data directly from BigQuery storage by using the BigQuery Storage Read API (https://cloud.google.com/bigquery/docs/reference/storage). For information about limitations in the Storage Read API, see https://cloud.google.com/bigquery/docs/reference/storage#limitations. You must specify either `inputTableSpec` or `query`. If you set both parameters, the template uses the `query` parameter. (Example: bigquery-project:dataset.input_table). -* **outputDeadletterTable** : The BigQuery table for messages that failed to reach the output table, in the format :.. If a table doesn't exist, is is created during pipeline execution. If not specified, `_error_records` is used. (Example: your-project-id:your-dataset.your-table-name). -* **query** : The SQL query to use to read data from BigQuery. If the BigQuery dataset is in a different project than the Dataflow job, specify the full dataset name in the SQL query, for example: ... By default, the `query` parameter uses GoogleSQL (https://cloud.google.com/bigquery/docs/introduction-sql), unless `useLegacySql` is `true`. You must specify either `inputTableSpec` or `query`. If you set both parameters, the template uses the `query` parameter. (Example: select * from sampledb.sample_table). -* **useLegacySql** : Set to true to use legacy SQL. This parameter only applies when using the `query` parameter. Defaults to: false. -* **queryLocation** : Needed when reading from an authorized view without underlying table's permission. (Example: US). -* **queryTempDataset** : With this option, you can set an existing dataset to create the temporary table to store the results of the query. (Example: temp_dataset). -* **elasticsearchUsername** : The Elasticsearch username to authenticate with. If specified, the value of 'apiKey' is ignored. -* **elasticsearchPassword** : The Elasticsearch password to authenticate with. If specified, the value of 'apiKey' is ignored. -* **batchSize** : The batch size in number of documents. Defaults to: 1000. -* **batchSizeBytes** : The batch size in number of bytes. Defaults to: 5242880 (5mb). -* **maxRetryAttempts** : The maximum number of retry attempts. Must be greater than zero. Defaults to: no retries. -* **maxRetryDuration** : The maximum retry duration in milliseconds. Must be greater than zero. Defaults to: no retries. -* **propertyAsIndex** : The property in the document being indexed whose value specifies `_index` metadata to include with the document in bulk requests. Takes precedence over an `_index` UDF. Defaults to: none. -* **javaScriptIndexFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for a function that specifies `_index` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIndexFnName** : The name of the UDF JavaScript function that specifies `_index` metadata to include with the document in bulk requests. Defaults to: none. -* **propertyAsId** : A property in the document being indexed whose value specifies `_id` metadata to include with the document in bulk requests. Takes precedence over an `_id` UDF. Defaults to: none. -* **javaScriptIdFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for the function that specifies `_id` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIdFnName** : The name of the UDF JavaScript function that specifies the `_id` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptTypeFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for a function that specifies `_type` metadata to include with documents in bulk requests. Default: none. -* **javaScriptTypeFnName** : The name of the UDF JavaScript function that specifies the `_type` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIsDeleteFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for the function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to: none. -* **javaScriptIsDeleteFnName** : The name of the UDF JavaScript function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to: none. -* **usePartialUpdate** : Whether to use partial updates (update rather than create or index, allowing partial documents) with Elasticsearch requests. Defaults to: false. -* **bulkInsertMethod** : Whether to use `INDEX` (index, allows upserts) or `CREATE` (create, errors on duplicate _id) with Elasticsearch bulk requests. Defaults to: CREATE. -* **trustSelfSignedCerts** : Whether to trust self-signed certificate or not. An Elasticsearch instance installed might have a self-signed certificate, Enable this to True to by-pass the validation on SSL certificate. (default is False). -* **disableCertificateValidation** : If 'true', trust the self-signed SSL certificate. An Elasticsearch instance might have a self-signed certificate. To bypass validation for the certificate, set this parameter to 'true'. Default: false. -* **apiKeyKMSEncryptionKey** : The Cloud KMS key to decrypt the API key. This parameter must be provided if the apiKeySource is set to KMS. If this parameter is provided, apiKey string should be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. The Key should be in the format projects/{gcp_project}/locations/{key_region}/keyRings/{key_ring}/cryptoKeys/{kms_key_name}. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt (Example: projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name). -* **apiKeySecretId** : Secret Manager secret ID for the apiKey. This parameter should be provided if the apiKeySource is set to SECRET_MANAGER. Should be in the format projects/{project}/secrets/{secret}/versions/{secret_version}. (Example: projects/your-project-id/secrets/your-secret/versions/your-secret-version). -* **apiKeySource** : Source of the API key. One of PLAINTEXT, KMS or SECRET_MANAGER. This parameter must be provided if secret manager or KMS is used. If apiKeySource is set to KMS, apiKeyKMSEncryptionKey and encrypted apiKey must be provided. If apiKeySource is set to SECRET_MANAGER, apiKeySecretId must be provided. If apiKeySource is set to PLAINTEXT, apiKey must be provided. Defaults to: PLAINTEXT. -* **socketTimeout** : If set, overwrites the default max retry timeout and default socket timeout (30000ms) in the Elastic RestClient. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. (Example: gs://my-bucket/my-udfs/my_file.js). -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **inputTableSpec**: The BigQuery table to read from. If you specify `inputTableSpec`, the template reads the data directly from BigQuery storage by using the BigQuery Storage Read API (https://cloud.google.com/bigquery/docs/reference/storage). For information about limitations in the Storage Read API, see https://cloud.google.com/bigquery/docs/reference/storage#limitations. You must specify either `inputTableSpec` or `query`. If you set both parameters, the template uses the `query` parameter. For example, `:.`. +* **outputDeadletterTable**: The BigQuery table for messages that failed to reach the output table. If a table doesn't exist, it is created during pipeline execution. If not specified, `_error_records` is used. For example, `:.`. +* **query**: The SQL query to use to read data from BigQuery. If the BigQuery dataset is in a different project than the Dataflow job, specify the full dataset name in the SQL query, for example: ... By default, the `query` parameter uses GoogleSQL (https://cloud.google.com/bigquery/docs/introduction-sql), unless `useLegacySql` is `true`. You must specify either `inputTableSpec` or `query`. If you set both parameters, the template uses the `query` parameter. For example, `select * from sampledb.sample_table`. +* **useLegacySql**: Set to `true` to use legacy SQL. This parameter only applies when using the `query` parameter. Defaults to `false`. +* **queryLocation**: Needed when reading from an authorized view without underlying table's permission. For example, `US`. +* **elasticsearchUsername**: The Elasticsearch username to authenticate with. If specified, the value of `apiKey` is ignored. +* **elasticsearchPassword**: The Elasticsearch password to authenticate with. If specified, the value of `apiKey` is ignored. +* **batchSize**: The batch size in number of documents. Defaults to `1000`. +* **batchSizeBytes**: The batch size in number of bytes. Defaults to `5242880` (5mb). +* **maxRetryAttempts**: The maximum number of retry attempts. Must be greater than zero. Defaults to `no retries`. +* **maxRetryDuration**: The maximum retry duration in milliseconds. Must be greater than zero. Defaults to `no retries`. +* **propertyAsIndex**: The property in the document being indexed whose value specifies `_index` metadata to include with the document in bulk requests. Takes precedence over an `_index` UDF. Defaults to `none`. +* **javaScriptIndexFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for a function that specifies `_index` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIndexFnName**: The name of the UDF JavaScript function that specifies `_index` metadata to include with the document in bulk requests. Defaults to `none`. +* **propertyAsId**: A property in the document being indexed whose value specifies `_id` metadata to include with the document in bulk requests. Takes precedence over an `_id` UDF. Defaults to `none`. +* **javaScriptIdFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for the function that specifies `_id` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIdFnName**: The name of the UDF JavaScript function that specifies the `_id` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptTypeFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for a function that specifies `_type` metadata to include with documents in bulk requests. Defaults to `none`. +* **javaScriptTypeFnName**: The name of the UDF JavaScript function that specifies the `_type` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIsDeleteFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for the function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to `none`. +* **javaScriptIsDeleteFnName**: The name of the UDF JavaScript function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to `none`. +* **usePartialUpdate**: Whether to use partial updates (update rather than create or index, allowing partial documents) with Elasticsearch requests. Defaults to `false`. +* **bulkInsertMethod**: Whether to use `INDEX` (index, allows upserts) or `CREATE` (create, errors on duplicate _id) with Elasticsearch bulk requests. Defaults to `CREATE`. +* **trustSelfSignedCerts**: Whether to trust self-signed certificate or not. An Elasticsearch instance installed might have a self-signed certificate, Enable this to true to by-pass the validation on SSL certificate. (Defaults to: `false`). +* **disableCertificateValidation**: If `true`, trust the self-signed SSL certificate. An Elasticsearch instance might have a self-signed certificate. To bypass validation for the certificate, set this parameter to `true`. Defaults to `false`. +* **apiKeyKMSEncryptionKey**: The Cloud KMS key to decrypt the API key. This parameter is required if the `apiKeySource` is set to `KMS`. If this parameter is provided, pass in an encrypted `apiKey` string. Encrypt parameters using the KMS API encrypt endpoint. For the key, use the format `projects//locations//keyRings//cryptoKeys/`. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt For example, `projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name`. +* **apiKeySecretId**: The Secret Manager secret ID for the apiKey. If the `apiKeySource` is set to `SECRET_MANAGER`, provide this parameter. Use the format `projects//secrets//versions/. For example, `projects/your-project-id/secrets/your-secret/versions/your-secret-version`. +* **apiKeySource**: The source of the API key. Allowed values are `PLAINTEXT`, `KMS` orand `SECRET_MANAGER`. This parameter is required when you use Secret Manager or KMS. If `apiKeySource` is set to `KMS`, `apiKeyKMSEncryptionKey` and encrypted apiKey must be provided. If `apiKeySource` is set to `SECRET_MANAGER`, `apiKeySecretId` must be provided. If `apiKeySource` is set to `PLAINTEXT`, `apiKey` must be provided. Defaults to: PLAINTEXT. +* **socketTimeout**: If set, overwrites the default max retry timeout and default socket timeout (30000ms) in the Elastic RestClient. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). ## User-Defined functions (UDFs) @@ -284,7 +283,7 @@ mvn clean package -PtemplatesRun \ -Dregion="$REGION" \ -DjobName="bigquery-to-elasticsearch-job" \ -DtemplateName="BigQuery_to_Elasticsearch" \ --Dparameters="inputTableSpec=$INPUT_TABLE_SPEC,outputDeadletterTable=$OUTPUT_DEADLETTER_TABLE,query=$QUERY,useLegacySql=$USE_LEGACY_SQL,queryLocation=$QUERY_LOCATION,queryTempDataset=$QUERY_TEMP_DATASET,connectionUrl=$CONNECTION_URL,apiKey=$API_KEY,elasticsearchUsername=$ELASTICSEARCH_USERNAME,elasticsearchPassword=$ELASTICSEARCH_PASSWORD,index=$INDEX,batchSize=$BATCH_SIZE,batchSizeBytes=$BATCH_SIZE_BYTES,maxRetryAttempts=$MAX_RETRY_ATTEMPTS,maxRetryDuration=$MAX_RETRY_DURATION,propertyAsIndex=$PROPERTY_AS_INDEX,javaScriptIndexFnGcsPath=$JAVA_SCRIPT_INDEX_FN_GCS_PATH,javaScriptIndexFnName=$JAVA_SCRIPT_INDEX_FN_NAME,propertyAsId=$PROPERTY_AS_ID,javaScriptIdFnGcsPath=$JAVA_SCRIPT_ID_FN_GCS_PATH,javaScriptIdFnName=$JAVA_SCRIPT_ID_FN_NAME,javaScriptTypeFnGcsPath=$JAVA_SCRIPT_TYPE_FN_GCS_PATH,javaScriptTypeFnName=$JAVA_SCRIPT_TYPE_FN_NAME,javaScriptIsDeleteFnGcsPath=$JAVA_SCRIPT_IS_DELETE_FN_GCS_PATH,javaScriptIsDeleteFnName=$JAVA_SCRIPT_IS_DELETE_FN_NAME,usePartialUpdate=$USE_PARTIAL_UPDATE,bulkInsertMethod=$BULK_INSERT_METHOD,trustSelfSignedCerts=$TRUST_SELF_SIGNED_CERTS,disableCertificateValidation=$DISABLE_CERTIFICATE_VALIDATION,apiKeyKMSEncryptionKey=$API_KEY_KMSENCRYPTION_KEY,apiKeySecretId=$API_KEY_SECRET_ID,apiKeySource=$API_KEY_SOURCE,socketTimeout=$SOCKET_TIMEOUT,javascriptTextTransformGcsPath=$JAVASCRIPT_TEXT_TRANSFORM_GCS_PATH,javascriptTextTransformFunctionName=$JAVASCRIPT_TEXT_TRANSFORM_FUNCTION_NAME" \ +-Dparameters="inputTableSpec=$INPUT_TABLE_SPEC,outputDeadletterTable=$OUTPUT_DEADLETTER_TABLE,query=$QUERY,useLegacySql=$USE_LEGACY_SQL,queryLocation=$QUERY_LOCATION,connectionUrl=$CONNECTION_URL,apiKey=$API_KEY,elasticsearchUsername=$ELASTICSEARCH_USERNAME,elasticsearchPassword=$ELASTICSEARCH_PASSWORD,index=$INDEX,batchSize=$BATCH_SIZE,batchSizeBytes=$BATCH_SIZE_BYTES,maxRetryAttempts=$MAX_RETRY_ATTEMPTS,maxRetryDuration=$MAX_RETRY_DURATION,propertyAsIndex=$PROPERTY_AS_INDEX,javaScriptIndexFnGcsPath=$JAVA_SCRIPT_INDEX_FN_GCS_PATH,javaScriptIndexFnName=$JAVA_SCRIPT_INDEX_FN_NAME,propertyAsId=$PROPERTY_AS_ID,javaScriptIdFnGcsPath=$JAVA_SCRIPT_ID_FN_GCS_PATH,javaScriptIdFnName=$JAVA_SCRIPT_ID_FN_NAME,javaScriptTypeFnGcsPath=$JAVA_SCRIPT_TYPE_FN_GCS_PATH,javaScriptTypeFnName=$JAVA_SCRIPT_TYPE_FN_NAME,javaScriptIsDeleteFnGcsPath=$JAVA_SCRIPT_IS_DELETE_FN_GCS_PATH,javaScriptIsDeleteFnName=$JAVA_SCRIPT_IS_DELETE_FN_NAME,usePartialUpdate=$USE_PARTIAL_UPDATE,bulkInsertMethod=$BULK_INSERT_METHOD,trustSelfSignedCerts=$TRUST_SELF_SIGNED_CERTS,disableCertificateValidation=$DISABLE_CERTIFICATE_VALIDATION,apiKeyKMSEncryptionKey=$API_KEY_KMSENCRYPTION_KEY,apiKeySecretId=$API_KEY_SECRET_ID,apiKeySource=$API_KEY_SOURCE,socketTimeout=$SOCKET_TIMEOUT,javascriptTextTransformGcsPath=$JAVASCRIPT_TEXT_TRANSFORM_GCS_PATH,javascriptTextTransformFunctionName=$JAVASCRIPT_TEXT_TRANSFORM_FUNCTION_NAME" \ -f v2/googlecloud-to-elasticsearch ``` @@ -329,15 +328,14 @@ resource "google_dataflow_flex_template_job" "bigquery_to_elasticsearch" { name = "bigquery-to-elasticsearch" region = var.region parameters = { - connectionUrl = "https://elasticsearch-host:9200" + connectionUrl = "" apiKey = "" - index = "my-index" - # inputTableSpec = "bigquery-project:dataset.input_table" - # outputDeadletterTable = "your-project-id:your-dataset.your-table-name" - # query = "select * from sampledb.sample_table" + index = "" + # inputTableSpec = "" + # outputDeadletterTable = "" + # query = "" # useLegacySql = "false" - # queryLocation = "US" - # queryTempDataset = "temp_dataset" + # queryLocation = "" # elasticsearchUsername = "" # elasticsearchPassword = "" # batchSize = "1000" @@ -358,11 +356,11 @@ resource "google_dataflow_flex_template_job" "bigquery_to_elasticsearch" { # bulkInsertMethod = "CREATE" # trustSelfSignedCerts = "false" # disableCertificateValidation = "false" - # apiKeyKMSEncryptionKey = "projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name" - # apiKeySecretId = "projects/your-project-id/secrets/your-secret/versions/your-secret-version" + # apiKeyKMSEncryptionKey = "" + # apiKeySecretId = "" # apiKeySource = "PLAINTEXT" # socketTimeout = "" - # javascriptTextTransformGcsPath = "gs://my-bucket/my-udfs/my_file.js" + # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" } } diff --git a/v2/googlecloud-to-elasticsearch/README_BigQuery_to_Elasticsearch_Xlang.md b/v2/googlecloud-to-elasticsearch/README_BigQuery_to_Elasticsearch_Xlang.md index 6a468c9e53..7f3f5ab9ab 100644 --- a/v2/googlecloud-to-elasticsearch/README_BigQuery_to_Elasticsearch_Xlang.md +++ b/v2/googlecloud-to-elasticsearch/README_BigQuery_to_Elasticsearch_Xlang.md @@ -18,44 +18,43 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **connectionUrl** : The Elasticsearch URL in the format https://hostname:[port]. If using Elastic Cloud, specify the CloudID. (Example: https://elasticsearch-host:9200). -* **apiKey** : The Base64-encoded API key to use for authentication. -* **index** : The Elasticsearch index that the requests are issued to, such as `my-index.` (Example: my-index). +* **connectionUrl**: The Elasticsearch URL in the format `https://hostname:[port]`. If using Elastic Cloud, specify the CloudID. For example, `https://elasticsearch-host:9200`. +* **apiKey**: The Base64-encoded API key to use for authentication. +* **index**: The Elasticsearch index that the requests are issued to. For example, `my-index`. ### Optional parameters -* **inputTableSpec** : The BigQuery table to read from. Format: `projectId:datasetId.tablename`. If you specify `inputTableSpec`, the template reads the data directly from BigQuery storage by using the BigQuery Storage Read API (https://cloud.google.com/bigquery/docs/reference/storage). For information about limitations in the Storage Read API, see https://cloud.google.com/bigquery/docs/reference/storage#limitations. You must specify either `inputTableSpec` or `query`. If you set both parameters, the template uses the `query` parameter. (Example: bigquery-project:dataset.input_table). -* **outputDeadletterTable** : The BigQuery table for messages that failed to reach the output table, in the format :.. If a table doesn't exist, is is created during pipeline execution. If not specified, `_error_records` is used. (Example: your-project-id:your-dataset.your-table-name). -* **query** : The SQL query to use to read data from BigQuery. If the BigQuery dataset is in a different project than the Dataflow job, specify the full dataset name in the SQL query, for example: ... By default, the `query` parameter uses GoogleSQL (https://cloud.google.com/bigquery/docs/introduction-sql), unless `useLegacySql` is `true`. You must specify either `inputTableSpec` or `query`. If you set both parameters, the template uses the `query` parameter. (Example: select * from sampledb.sample_table). -* **useLegacySql** : Set to true to use legacy SQL. This parameter only applies when using the `query` parameter. Defaults to: false. -* **queryLocation** : Needed when reading from an authorized view without underlying table's permission. (Example: US). -* **queryTempDataset** : With this option, you can set an existing dataset to create the temporary table to store the results of the query. (Example: temp_dataset). -* **elasticsearchUsername** : The Elasticsearch username to authenticate with. If specified, the value of 'apiKey' is ignored. -* **elasticsearchPassword** : The Elasticsearch password to authenticate with. If specified, the value of 'apiKey' is ignored. -* **batchSize** : The batch size in number of documents. Defaults to: 1000. -* **batchSizeBytes** : The batch size in number of bytes. Defaults to: 5242880 (5mb). -* **maxRetryAttempts** : The maximum number of retry attempts. Must be greater than zero. Defaults to: no retries. -* **maxRetryDuration** : The maximum retry duration in milliseconds. Must be greater than zero. Defaults to: no retries. -* **propertyAsIndex** : The property in the document being indexed whose value specifies `_index` metadata to include with the document in bulk requests. Takes precedence over an `_index` UDF. Defaults to: none. -* **javaScriptIndexFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for a function that specifies `_index` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIndexFnName** : The name of the UDF JavaScript function that specifies `_index` metadata to include with the document in bulk requests. Defaults to: none. -* **propertyAsId** : A property in the document being indexed whose value specifies `_id` metadata to include with the document in bulk requests. Takes precedence over an `_id` UDF. Defaults to: none. -* **javaScriptIdFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for the function that specifies `_id` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIdFnName** : The name of the UDF JavaScript function that specifies the `_id` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptTypeFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for a function that specifies `_type` metadata to include with documents in bulk requests. Default: none. -* **javaScriptTypeFnName** : The name of the UDF JavaScript function that specifies the `_type` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIsDeleteFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for the function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to: none. -* **javaScriptIsDeleteFnName** : The name of the UDF JavaScript function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to: none. -* **usePartialUpdate** : Whether to use partial updates (update rather than create or index, allowing partial documents) with Elasticsearch requests. Defaults to: false. -* **bulkInsertMethod** : Whether to use `INDEX` (index, allows upserts) or `CREATE` (create, errors on duplicate _id) with Elasticsearch bulk requests. Defaults to: CREATE. -* **trustSelfSignedCerts** : Whether to trust self-signed certificate or not. An Elasticsearch instance installed might have a self-signed certificate, Enable this to True to by-pass the validation on SSL certificate. (default is False). -* **disableCertificateValidation** : If 'true', trust the self-signed SSL certificate. An Elasticsearch instance might have a self-signed certificate. To bypass validation for the certificate, set this parameter to 'true'. Default: false. -* **apiKeyKMSEncryptionKey** : The Cloud KMS key to decrypt the API key. This parameter must be provided if the apiKeySource is set to KMS. If this parameter is provided, apiKey string should be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. The Key should be in the format projects/{gcp_project}/locations/{key_region}/keyRings/{key_ring}/cryptoKeys/{kms_key_name}. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt (Example: projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name). -* **apiKeySecretId** : Secret Manager secret ID for the apiKey. This parameter should be provided if the apiKeySource is set to SECRET_MANAGER. Should be in the format projects/{project}/secrets/{secret}/versions/{secret_version}. (Example: projects/your-project-id/secrets/your-secret/versions/your-secret-version). -* **apiKeySource** : Source of the API key. One of PLAINTEXT, KMS or SECRET_MANAGER. This parameter must be provided if secret manager or KMS is used. If apiKeySource is set to KMS, apiKeyKMSEncryptionKey and encrypted apiKey must be provided. If apiKeySource is set to SECRET_MANAGER, apiKeySecretId must be provided. If apiKeySource is set to PLAINTEXT, apiKey must be provided. Defaults to: PLAINTEXT. -* **socketTimeout** : If set, overwrites the default max retry timeout and default socket timeout (30000ms) in the Elastic RestClient. -* **pythonExternalTextTransformGcsPath** : The Cloud Storage path pattern for the Python code containing your user-defined functions. (Example: gs://your-bucket/your-function.py). -* **pythonExternalTextTransformFunctionName** : The name of the function to call from your Python file. Use only letters, digits, and underscores. (Example: 'transform' or 'transform_udf1'). +* **inputTableSpec**: The BigQuery table to read from. If you specify `inputTableSpec`, the template reads the data directly from BigQuery storage by using the BigQuery Storage Read API (https://cloud.google.com/bigquery/docs/reference/storage). For information about limitations in the Storage Read API, see https://cloud.google.com/bigquery/docs/reference/storage#limitations. You must specify either `inputTableSpec` or `query`. If you set both parameters, the template uses the `query` parameter. For example, `:.`. +* **outputDeadletterTable**: The BigQuery table for messages that failed to reach the output table. If a table doesn't exist, it is created during pipeline execution. If not specified, `_error_records` is used. For example, `:.`. +* **query**: The SQL query to use to read data from BigQuery. If the BigQuery dataset is in a different project than the Dataflow job, specify the full dataset name in the SQL query, for example: ... By default, the `query` parameter uses GoogleSQL (https://cloud.google.com/bigquery/docs/introduction-sql), unless `useLegacySql` is `true`. You must specify either `inputTableSpec` or `query`. If you set both parameters, the template uses the `query` parameter. For example, `select * from sampledb.sample_table`. +* **useLegacySql**: Set to `true` to use legacy SQL. This parameter only applies when using the `query` parameter. Defaults to `false`. +* **queryLocation**: Needed when reading from an authorized view without underlying table's permission. For example, `US`. +* **elasticsearchUsername**: The Elasticsearch username to authenticate with. If specified, the value of `apiKey` is ignored. +* **elasticsearchPassword**: The Elasticsearch password to authenticate with. If specified, the value of `apiKey` is ignored. +* **batchSize**: The batch size in number of documents. Defaults to `1000`. +* **batchSizeBytes**: The batch size in number of bytes. Defaults to `5242880` (5mb). +* **maxRetryAttempts**: The maximum number of retry attempts. Must be greater than zero. Defaults to `no retries`. +* **maxRetryDuration**: The maximum retry duration in milliseconds. Must be greater than zero. Defaults to `no retries`. +* **propertyAsIndex**: The property in the document being indexed whose value specifies `_index` metadata to include with the document in bulk requests. Takes precedence over an `_index` UDF. Defaults to `none`. +* **javaScriptIndexFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for a function that specifies `_index` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIndexFnName**: The name of the UDF JavaScript function that specifies `_index` metadata to include with the document in bulk requests. Defaults to `none`. +* **propertyAsId**: A property in the document being indexed whose value specifies `_id` metadata to include with the document in bulk requests. Takes precedence over an `_id` UDF. Defaults to `none`. +* **javaScriptIdFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for the function that specifies `_id` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIdFnName**: The name of the UDF JavaScript function that specifies the `_id` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptTypeFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for a function that specifies `_type` metadata to include with documents in bulk requests. Defaults to `none`. +* **javaScriptTypeFnName**: The name of the UDF JavaScript function that specifies the `_type` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIsDeleteFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for the function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to `none`. +* **javaScriptIsDeleteFnName**: The name of the UDF JavaScript function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to `none`. +* **usePartialUpdate**: Whether to use partial updates (update rather than create or index, allowing partial documents) with Elasticsearch requests. Defaults to `false`. +* **bulkInsertMethod**: Whether to use `INDEX` (index, allows upserts) or `CREATE` (create, errors on duplicate _id) with Elasticsearch bulk requests. Defaults to `CREATE`. +* **trustSelfSignedCerts**: Whether to trust self-signed certificate or not. An Elasticsearch instance installed might have a self-signed certificate, Enable this to true to by-pass the validation on SSL certificate. (Defaults to: `false`). +* **disableCertificateValidation**: If `true`, trust the self-signed SSL certificate. An Elasticsearch instance might have a self-signed certificate. To bypass validation for the certificate, set this parameter to `true`. Defaults to `false`. +* **apiKeyKMSEncryptionKey**: The Cloud KMS key to decrypt the API key. This parameter is required if the `apiKeySource` is set to `KMS`. If this parameter is provided, pass in an encrypted `apiKey` string. Encrypt parameters using the KMS API encrypt endpoint. For the key, use the format `projects//locations//keyRings//cryptoKeys/`. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt For example, `projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name`. +* **apiKeySecretId**: The Secret Manager secret ID for the apiKey. If the `apiKeySource` is set to `SECRET_MANAGER`, provide this parameter. Use the format `projects//secrets//versions/. For example, `projects/your-project-id/secrets/your-secret/versions/your-secret-version`. +* **apiKeySource**: The source of the API key. Allowed values are `PLAINTEXT`, `KMS` orand `SECRET_MANAGER`. This parameter is required when you use Secret Manager or KMS. If `apiKeySource` is set to `KMS`, `apiKeyKMSEncryptionKey` and encrypted apiKey must be provided. If `apiKeySource` is set to `SECRET_MANAGER`, `apiKeySecretId` must be provided. If `apiKeySource` is set to `PLAINTEXT`, `apiKey` must be provided. Defaults to: PLAINTEXT. +* **socketTimeout**: If set, overwrites the default max retry timeout and default socket timeout (30000ms) in the Elastic RestClient. +* **pythonExternalTextTransformGcsPath**: The Cloud Storage path pattern for the Python code containing your user-defined functions. For example, `gs://your-bucket/your-function.py`. +* **pythonExternalTextTransformFunctionName**: The name of the function to call from your Python file. Use only letters, digits, and underscores. For example, `'transform' or 'transform_udf1'`. @@ -274,7 +273,7 @@ mvn clean package -PtemplatesRun \ -Dregion="$REGION" \ -DjobName="bigquery-to-elasticsearch-xlang-job" \ -DtemplateName="BigQuery_to_Elasticsearch_Xlang" \ --Dparameters="inputTableSpec=$INPUT_TABLE_SPEC,outputDeadletterTable=$OUTPUT_DEADLETTER_TABLE,query=$QUERY,useLegacySql=$USE_LEGACY_SQL,queryLocation=$QUERY_LOCATION,queryTempDataset=$QUERY_TEMP_DATASET,connectionUrl=$CONNECTION_URL,apiKey=$API_KEY,elasticsearchUsername=$ELASTICSEARCH_USERNAME,elasticsearchPassword=$ELASTICSEARCH_PASSWORD,index=$INDEX,batchSize=$BATCH_SIZE,batchSizeBytes=$BATCH_SIZE_BYTES,maxRetryAttempts=$MAX_RETRY_ATTEMPTS,maxRetryDuration=$MAX_RETRY_DURATION,propertyAsIndex=$PROPERTY_AS_INDEX,javaScriptIndexFnGcsPath=$JAVA_SCRIPT_INDEX_FN_GCS_PATH,javaScriptIndexFnName=$JAVA_SCRIPT_INDEX_FN_NAME,propertyAsId=$PROPERTY_AS_ID,javaScriptIdFnGcsPath=$JAVA_SCRIPT_ID_FN_GCS_PATH,javaScriptIdFnName=$JAVA_SCRIPT_ID_FN_NAME,javaScriptTypeFnGcsPath=$JAVA_SCRIPT_TYPE_FN_GCS_PATH,javaScriptTypeFnName=$JAVA_SCRIPT_TYPE_FN_NAME,javaScriptIsDeleteFnGcsPath=$JAVA_SCRIPT_IS_DELETE_FN_GCS_PATH,javaScriptIsDeleteFnName=$JAVA_SCRIPT_IS_DELETE_FN_NAME,usePartialUpdate=$USE_PARTIAL_UPDATE,bulkInsertMethod=$BULK_INSERT_METHOD,trustSelfSignedCerts=$TRUST_SELF_SIGNED_CERTS,disableCertificateValidation=$DISABLE_CERTIFICATE_VALIDATION,apiKeyKMSEncryptionKey=$API_KEY_KMSENCRYPTION_KEY,apiKeySecretId=$API_KEY_SECRET_ID,apiKeySource=$API_KEY_SOURCE,socketTimeout=$SOCKET_TIMEOUT,pythonExternalTextTransformGcsPath=$PYTHON_EXTERNAL_TEXT_TRANSFORM_GCS_PATH,pythonExternalTextTransformFunctionName=$PYTHON_EXTERNAL_TEXT_TRANSFORM_FUNCTION_NAME" \ +-Dparameters="inputTableSpec=$INPUT_TABLE_SPEC,outputDeadletterTable=$OUTPUT_DEADLETTER_TABLE,query=$QUERY,useLegacySql=$USE_LEGACY_SQL,queryLocation=$QUERY_LOCATION,connectionUrl=$CONNECTION_URL,apiKey=$API_KEY,elasticsearchUsername=$ELASTICSEARCH_USERNAME,elasticsearchPassword=$ELASTICSEARCH_PASSWORD,index=$INDEX,batchSize=$BATCH_SIZE,batchSizeBytes=$BATCH_SIZE_BYTES,maxRetryAttempts=$MAX_RETRY_ATTEMPTS,maxRetryDuration=$MAX_RETRY_DURATION,propertyAsIndex=$PROPERTY_AS_INDEX,javaScriptIndexFnGcsPath=$JAVA_SCRIPT_INDEX_FN_GCS_PATH,javaScriptIndexFnName=$JAVA_SCRIPT_INDEX_FN_NAME,propertyAsId=$PROPERTY_AS_ID,javaScriptIdFnGcsPath=$JAVA_SCRIPT_ID_FN_GCS_PATH,javaScriptIdFnName=$JAVA_SCRIPT_ID_FN_NAME,javaScriptTypeFnGcsPath=$JAVA_SCRIPT_TYPE_FN_GCS_PATH,javaScriptTypeFnName=$JAVA_SCRIPT_TYPE_FN_NAME,javaScriptIsDeleteFnGcsPath=$JAVA_SCRIPT_IS_DELETE_FN_GCS_PATH,javaScriptIsDeleteFnName=$JAVA_SCRIPT_IS_DELETE_FN_NAME,usePartialUpdate=$USE_PARTIAL_UPDATE,bulkInsertMethod=$BULK_INSERT_METHOD,trustSelfSignedCerts=$TRUST_SELF_SIGNED_CERTS,disableCertificateValidation=$DISABLE_CERTIFICATE_VALIDATION,apiKeyKMSEncryptionKey=$API_KEY_KMSENCRYPTION_KEY,apiKeySecretId=$API_KEY_SECRET_ID,apiKeySource=$API_KEY_SOURCE,socketTimeout=$SOCKET_TIMEOUT,pythonExternalTextTransformGcsPath=$PYTHON_EXTERNAL_TEXT_TRANSFORM_GCS_PATH,pythonExternalTextTransformFunctionName=$PYTHON_EXTERNAL_TEXT_TRANSFORM_FUNCTION_NAME" \ -f v2/googlecloud-to-elasticsearch ``` @@ -319,15 +318,14 @@ resource "google_dataflow_flex_template_job" "bigquery_to_elasticsearch_xlang" { name = "bigquery-to-elasticsearch-xlang" region = var.region parameters = { - connectionUrl = "https://elasticsearch-host:9200" + connectionUrl = "" apiKey = "" - index = "my-index" - # inputTableSpec = "bigquery-project:dataset.input_table" - # outputDeadletterTable = "your-project-id:your-dataset.your-table-name" - # query = "select * from sampledb.sample_table" + index = "" + # inputTableSpec = "" + # outputDeadletterTable = "" + # query = "" # useLegacySql = "false" - # queryLocation = "US" - # queryTempDataset = "temp_dataset" + # queryLocation = "" # elasticsearchUsername = "" # elasticsearchPassword = "" # batchSize = "1000" @@ -348,12 +346,12 @@ resource "google_dataflow_flex_template_job" "bigquery_to_elasticsearch_xlang" { # bulkInsertMethod = "CREATE" # trustSelfSignedCerts = "false" # disableCertificateValidation = "false" - # apiKeyKMSEncryptionKey = "projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name" - # apiKeySecretId = "projects/your-project-id/secrets/your-secret/versions/your-secret-version" + # apiKeyKMSEncryptionKey = "" + # apiKeySecretId = "" # apiKeySource = "PLAINTEXT" # socketTimeout = "" - # pythonExternalTextTransformGcsPath = "gs://your-bucket/your-function.py" - # pythonExternalTextTransformFunctionName = "'transform' or 'transform_udf1'" + # pythonExternalTextTransformGcsPath = "" + # pythonExternalTextTransformFunctionName = "" } } ``` diff --git a/v2/googlecloud-to-elasticsearch/README_GCS_to_Elasticsearch.md b/v2/googlecloud-to-elasticsearch/README_GCS_to_Elasticsearch.md index 3126a8d58a..66c19b9071 100644 --- a/v2/googlecloud-to-elasticsearch/README_GCS_to_Elasticsearch.md +++ b/v2/googlecloud-to-elasticsearch/README_GCS_to_Elasticsearch.md @@ -27,48 +27,48 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **deadletterTable** : The BigQuery dead-letter table to send failed inserts to. (Example: your-project:your-dataset.your-table-name). -* **inputFileSpec** : The Cloud Storage file pattern to search for CSV files. Example: gs://mybucket/test-*.csv. -* **connectionUrl** : The Elasticsearch URL in the format https://hostname:[port]. If using Elastic Cloud, specify the CloudID. (Example: https://elasticsearch-host:9200). -* **apiKey** : The Base64-encoded API key to use for authentication. -* **index** : The Elasticsearch index that the requests are issued to, such as `my-index.` (Example: my-index). +* **deadletterTable**: The BigQuery dead-letter table to send failed inserts to. For example, `your-project:your-dataset.your-table-name`. +* **inputFileSpec**: The Cloud Storage file pattern to search for CSV files. For example, `gs://mybucket/test-*.csv`. +* **connectionUrl**: The Elasticsearch URL in the format `https://hostname:[port]`. If using Elastic Cloud, specify the CloudID. For example, `https://elasticsearch-host:9200`. +* **apiKey**: The Base64-encoded API key to use for authentication. +* **index**: The Elasticsearch index that the requests are issued to. For example, `my-index`. ### Optional parameters -* **inputFormat** : Input file format. Default is: CSV. -* **containsHeaders** : Input CSV files contain a header record (true/false). Only required if reading CSV files. Defaults to: false. -* **delimiter** : The column delimiter of the input text files. Default: use delimiter provided in csvFormat (Example: ,). -* **csvFormat** : CSV format specification to use for parsing records. Default is: Default. See https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.html for more details. Must match format names exactly found at: https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.Predefined.html. -* **jsonSchemaPath** : The path to the JSON schema. Defaults to: null. (Example: gs://path/to/schema). -* **largeNumFiles** : Set to true if number of files is in the tens of thousands. Defaults to: false. -* **csvFileEncoding** : The CSV file character encoding format. Allowed Values are US-ASCII, ISO-8859-1, UTF-8, and UTF-16. Defaults to: UTF-8. -* **logDetailedCsvConversionErrors** : Set to true to enable detailed error logging when CSV parsing fails. Note that this may expose sensitive data in the logs (e.g., if the CSV file contains passwords). Default: false. -* **elasticsearchUsername** : The Elasticsearch username to authenticate with. If specified, the value of 'apiKey' is ignored. -* **elasticsearchPassword** : The Elasticsearch password to authenticate with. If specified, the value of 'apiKey' is ignored. -* **batchSize** : The batch size in number of documents. Defaults to: 1000. -* **batchSizeBytes** : The batch size in number of bytes. Defaults to: 5242880 (5mb). -* **maxRetryAttempts** : The maximum number of retry attempts. Must be greater than zero. Defaults to: no retries. -* **maxRetryDuration** : The maximum retry duration in milliseconds. Must be greater than zero. Defaults to: no retries. -* **propertyAsIndex** : The property in the document being indexed whose value specifies `_index` metadata to include with the document in bulk requests. Takes precedence over an `_index` UDF. Defaults to: none. -* **javaScriptIndexFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for a function that specifies `_index` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIndexFnName** : The name of the UDF JavaScript function that specifies `_index` metadata to include with the document in bulk requests. Defaults to: none. -* **propertyAsId** : A property in the document being indexed whose value specifies `_id` metadata to include with the document in bulk requests. Takes precedence over an `_id` UDF. Defaults to: none. -* **javaScriptIdFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for the function that specifies `_id` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIdFnName** : The name of the UDF JavaScript function that specifies the `_id` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptTypeFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for a function that specifies `_type` metadata to include with documents in bulk requests. Default: none. -* **javaScriptTypeFnName** : The name of the UDF JavaScript function that specifies the `_type` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIsDeleteFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for the function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to: none. -* **javaScriptIsDeleteFnName** : The name of the UDF JavaScript function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to: none. -* **usePartialUpdate** : Whether to use partial updates (update rather than create or index, allowing partial documents) with Elasticsearch requests. Defaults to: false. -* **bulkInsertMethod** : Whether to use `INDEX` (index, allows upserts) or `CREATE` (create, errors on duplicate _id) with Elasticsearch bulk requests. Defaults to: CREATE. -* **trustSelfSignedCerts** : Whether to trust self-signed certificate or not. An Elasticsearch instance installed might have a self-signed certificate, Enable this to True to by-pass the validation on SSL certificate. (default is False). -* **disableCertificateValidation** : If 'true', trust the self-signed SSL certificate. An Elasticsearch instance might have a self-signed certificate. To bypass validation for the certificate, set this parameter to 'true'. Default: false. -* **apiKeyKMSEncryptionKey** : The Cloud KMS key to decrypt the API key. This parameter must be provided if the apiKeySource is set to KMS. If this parameter is provided, apiKey string should be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. The Key should be in the format projects/{gcp_project}/locations/{key_region}/keyRings/{key_ring}/cryptoKeys/{kms_key_name}. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt (Example: projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name). -* **apiKeySecretId** : Secret Manager secret ID for the apiKey. This parameter should be provided if the apiKeySource is set to SECRET_MANAGER. Should be in the format projects/{project}/secrets/{secret}/versions/{secret_version}. (Example: projects/your-project-id/secrets/your-secret/versions/your-secret-version). -* **apiKeySource** : Source of the API key. One of PLAINTEXT, KMS or SECRET_MANAGER. This parameter must be provided if secret manager or KMS is used. If apiKeySource is set to KMS, apiKeyKMSEncryptionKey and encrypted apiKey must be provided. If apiKeySource is set to SECRET_MANAGER, apiKeySecretId must be provided. If apiKeySource is set to PLAINTEXT, apiKey must be provided. Defaults to: PLAINTEXT. -* **socketTimeout** : If set, overwrites the default max retry timeout and default socket timeout (30000ms) in the Elastic RestClient. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. (Example: gs://my-bucket/my-udfs/my_file.js). -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **inputFormat**: The input file format. Defaults to `CSV`. +* **containsHeaders**: Input CSV files contain a header record (true/false). Only required if reading CSV files. Defaults to: false. +* **delimiter**: The column delimiter of the input text files. Default: `,` For example, `,`. +* **csvFormat**: CSV format specification to use for parsing records. Default is: `Default`. See https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.html for more details. Must match format names exactly found at: https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.Predefined.html. +* **jsonSchemaPath**: The path to the JSON schema. Defaults to `null`. For example, `gs://path/to/schema`. +* **largeNumFiles**: Set to true if number of files is in the tens of thousands. Defaults to `false`. +* **csvFileEncoding**: The CSV file character encoding format. Allowed values are `US-ASCII`, `ISO-8859-1`, `UTF-8`, and `UTF-16`. Defaults to: UTF-8. +* **logDetailedCsvConversionErrors**: Set to `true` to enable detailed error logging when CSV parsing fails. Note that this may expose sensitive data in the logs (e.g., if the CSV file contains passwords). Default: `false`. +* **elasticsearchUsername**: The Elasticsearch username to authenticate with. If specified, the value of `apiKey` is ignored. +* **elasticsearchPassword**: The Elasticsearch password to authenticate with. If specified, the value of `apiKey` is ignored. +* **batchSize**: The batch size in number of documents. Defaults to `1000`. +* **batchSizeBytes**: The batch size in number of bytes. Defaults to `5242880` (5mb). +* **maxRetryAttempts**: The maximum number of retry attempts. Must be greater than zero. Defaults to `no retries`. +* **maxRetryDuration**: The maximum retry duration in milliseconds. Must be greater than zero. Defaults to `no retries`. +* **propertyAsIndex**: The property in the document being indexed whose value specifies `_index` metadata to include with the document in bulk requests. Takes precedence over an `_index` UDF. Defaults to `none`. +* **javaScriptIndexFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for a function that specifies `_index` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIndexFnName**: The name of the UDF JavaScript function that specifies `_index` metadata to include with the document in bulk requests. Defaults to `none`. +* **propertyAsId**: A property in the document being indexed whose value specifies `_id` metadata to include with the document in bulk requests. Takes precedence over an `_id` UDF. Defaults to `none`. +* **javaScriptIdFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for the function that specifies `_id` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIdFnName**: The name of the UDF JavaScript function that specifies the `_id` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptTypeFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for a function that specifies `_type` metadata to include with documents in bulk requests. Defaults to `none`. +* **javaScriptTypeFnName**: The name of the UDF JavaScript function that specifies the `_type` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIsDeleteFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for the function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to `none`. +* **javaScriptIsDeleteFnName**: The name of the UDF JavaScript function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to `none`. +* **usePartialUpdate**: Whether to use partial updates (update rather than create or index, allowing partial documents) with Elasticsearch requests. Defaults to `false`. +* **bulkInsertMethod**: Whether to use `INDEX` (index, allows upserts) or `CREATE` (create, errors on duplicate _id) with Elasticsearch bulk requests. Defaults to `CREATE`. +* **trustSelfSignedCerts**: Whether to trust self-signed certificate or not. An Elasticsearch instance installed might have a self-signed certificate, Enable this to true to by-pass the validation on SSL certificate. (Defaults to: `false`). +* **disableCertificateValidation**: If `true`, trust the self-signed SSL certificate. An Elasticsearch instance might have a self-signed certificate. To bypass validation for the certificate, set this parameter to `true`. Defaults to `false`. +* **apiKeyKMSEncryptionKey**: The Cloud KMS key to decrypt the API key. This parameter is required if the `apiKeySource` is set to `KMS`. If this parameter is provided, pass in an encrypted `apiKey` string. Encrypt parameters using the KMS API encrypt endpoint. For the key, use the format `projects//locations//keyRings//cryptoKeys/`. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt For example, `projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name`. +* **apiKeySecretId**: The Secret Manager secret ID for the apiKey. If the `apiKeySource` is set to `SECRET_MANAGER`, provide this parameter. Use the format `projects//secrets//versions/. For example, `projects/your-project-id/secrets/your-secret/versions/your-secret-version`. +* **apiKeySource**: The source of the API key. Allowed values are `PLAINTEXT`, `KMS` orand `SECRET_MANAGER`. This parameter is required when you use Secret Manager or KMS. If `apiKeySource` is set to `KMS`, `apiKeyKMSEncryptionKey` and encrypted apiKey must be provided. If `apiKeySource` is set to `SECRET_MANAGER`, `apiKeySecretId` must be provided. If `apiKeySource` is set to `PLAINTEXT`, `apiKey` must be provided. Defaults to: PLAINTEXT. +* **socketTimeout**: If set, overwrites the default max retry timeout and default socket timeout (30000ms) in the Elastic RestClient. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). ## User-Defined functions (UDFs) @@ -354,16 +354,16 @@ resource "google_dataflow_flex_template_job" "gcs_to_elasticsearch" { name = "gcs-to-elasticsearch" region = var.region parameters = { - deadletterTable = "your-project:your-dataset.your-table-name" + deadletterTable = "" inputFileSpec = "" - connectionUrl = "https://elasticsearch-host:9200" + connectionUrl = "" apiKey = "" - index = "my-index" + index = "" # inputFormat = "csv" # containsHeaders = "false" - # delimiter = "," + # delimiter = "" # csvFormat = "Default" - # jsonSchemaPath = "gs://path/to/schema" + # jsonSchemaPath = "" # largeNumFiles = "false" # csvFileEncoding = "UTF-8" # logDetailedCsvConversionErrors = "false" @@ -387,11 +387,11 @@ resource "google_dataflow_flex_template_job" "gcs_to_elasticsearch" { # bulkInsertMethod = "CREATE" # trustSelfSignedCerts = "false" # disableCertificateValidation = "false" - # apiKeyKMSEncryptionKey = "projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name" - # apiKeySecretId = "projects/your-project-id/secrets/your-secret/versions/your-secret-version" + # apiKeyKMSEncryptionKey = "" + # apiKeySecretId = "" # apiKeySource = "PLAINTEXT" # socketTimeout = "" - # javascriptTextTransformGcsPath = "gs://my-bucket/my-udfs/my_file.js" + # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" } } diff --git a/v2/googlecloud-to-elasticsearch/README_GCS_to_Elasticsearch_Xlang.md b/v2/googlecloud-to-elasticsearch/README_GCS_to_Elasticsearch_Xlang.md index 901332ec72..295f694286 100644 --- a/v2/googlecloud-to-elasticsearch/README_GCS_to_Elasticsearch_Xlang.md +++ b/v2/googlecloud-to-elasticsearch/README_GCS_to_Elasticsearch_Xlang.md @@ -27,48 +27,48 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **deadletterTable** : The BigQuery dead-letter table to send failed inserts to. (Example: your-project:your-dataset.your-table-name). -* **inputFileSpec** : The Cloud Storage file pattern to search for CSV files. Example: gs://mybucket/test-*.csv. -* **connectionUrl** : The Elasticsearch URL in the format https://hostname:[port]. If using Elastic Cloud, specify the CloudID. (Example: https://elasticsearch-host:9200). -* **apiKey** : The Base64-encoded API key to use for authentication. -* **index** : The Elasticsearch index that the requests are issued to, such as `my-index.` (Example: my-index). +* **deadletterTable**: The BigQuery dead-letter table to send failed inserts to. For example, `your-project:your-dataset.your-table-name`. +* **inputFileSpec**: The Cloud Storage file pattern to search for CSV files. For example, `gs://mybucket/test-*.csv`. +* **connectionUrl**: The Elasticsearch URL in the format `https://hostname:[port]`. If using Elastic Cloud, specify the CloudID. For example, `https://elasticsearch-host:9200`. +* **apiKey**: The Base64-encoded API key to use for authentication. +* **index**: The Elasticsearch index that the requests are issued to. For example, `my-index`. ### Optional parameters -* **inputFormat** : Input file format. Default is: CSV. -* **containsHeaders** : Input CSV files contain a header record (true/false). Only required if reading CSV files. Defaults to: false. -* **delimiter** : The column delimiter of the input text files. Default: use delimiter provided in csvFormat (Example: ,). -* **csvFormat** : CSV format specification to use for parsing records. Default is: Default. See https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.html for more details. Must match format names exactly found at: https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.Predefined.html. -* **jsonSchemaPath** : The path to the JSON schema. Defaults to: null. (Example: gs://path/to/schema). -* **largeNumFiles** : Set to true if number of files is in the tens of thousands. Defaults to: false. -* **csvFileEncoding** : The CSV file character encoding format. Allowed Values are US-ASCII, ISO-8859-1, UTF-8, and UTF-16. Defaults to: UTF-8. -* **logDetailedCsvConversionErrors** : Set to true to enable detailed error logging when CSV parsing fails. Note that this may expose sensitive data in the logs (e.g., if the CSV file contains passwords). Default: false. -* **elasticsearchUsername** : The Elasticsearch username to authenticate with. If specified, the value of 'apiKey' is ignored. -* **elasticsearchPassword** : The Elasticsearch password to authenticate with. If specified, the value of 'apiKey' is ignored. -* **batchSize** : The batch size in number of documents. Defaults to: 1000. -* **batchSizeBytes** : The batch size in number of bytes. Defaults to: 5242880 (5mb). -* **maxRetryAttempts** : The maximum number of retry attempts. Must be greater than zero. Defaults to: no retries. -* **maxRetryDuration** : The maximum retry duration in milliseconds. Must be greater than zero. Defaults to: no retries. -* **propertyAsIndex** : The property in the document being indexed whose value specifies `_index` metadata to include with the document in bulk requests. Takes precedence over an `_index` UDF. Defaults to: none. -* **javaScriptIndexFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for a function that specifies `_index` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIndexFnName** : The name of the UDF JavaScript function that specifies `_index` metadata to include with the document in bulk requests. Defaults to: none. -* **propertyAsId** : A property in the document being indexed whose value specifies `_id` metadata to include with the document in bulk requests. Takes precedence over an `_id` UDF. Defaults to: none. -* **javaScriptIdFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for the function that specifies `_id` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIdFnName** : The name of the UDF JavaScript function that specifies the `_id` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptTypeFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for a function that specifies `_type` metadata to include with documents in bulk requests. Default: none. -* **javaScriptTypeFnName** : The name of the UDF JavaScript function that specifies the `_type` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIsDeleteFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for the function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to: none. -* **javaScriptIsDeleteFnName** : The name of the UDF JavaScript function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to: none. -* **usePartialUpdate** : Whether to use partial updates (update rather than create or index, allowing partial documents) with Elasticsearch requests. Defaults to: false. -* **bulkInsertMethod** : Whether to use `INDEX` (index, allows upserts) or `CREATE` (create, errors on duplicate _id) with Elasticsearch bulk requests. Defaults to: CREATE. -* **trustSelfSignedCerts** : Whether to trust self-signed certificate or not. An Elasticsearch instance installed might have a self-signed certificate, Enable this to True to by-pass the validation on SSL certificate. (default is False). -* **disableCertificateValidation** : If 'true', trust the self-signed SSL certificate. An Elasticsearch instance might have a self-signed certificate. To bypass validation for the certificate, set this parameter to 'true'. Default: false. -* **apiKeyKMSEncryptionKey** : The Cloud KMS key to decrypt the API key. This parameter must be provided if the apiKeySource is set to KMS. If this parameter is provided, apiKey string should be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. The Key should be in the format projects/{gcp_project}/locations/{key_region}/keyRings/{key_ring}/cryptoKeys/{kms_key_name}. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt (Example: projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name). -* **apiKeySecretId** : Secret Manager secret ID for the apiKey. This parameter should be provided if the apiKeySource is set to SECRET_MANAGER. Should be in the format projects/{project}/secrets/{secret}/versions/{secret_version}. (Example: projects/your-project-id/secrets/your-secret/versions/your-secret-version). -* **apiKeySource** : Source of the API key. One of PLAINTEXT, KMS or SECRET_MANAGER. This parameter must be provided if secret manager or KMS is used. If apiKeySource is set to KMS, apiKeyKMSEncryptionKey and encrypted apiKey must be provided. If apiKeySource is set to SECRET_MANAGER, apiKeySecretId must be provided. If apiKeySource is set to PLAINTEXT, apiKey must be provided. Defaults to: PLAINTEXT. -* **socketTimeout** : If set, overwrites the default max retry timeout and default socket timeout (30000ms) in the Elastic RestClient. -* **pythonExternalTextTransformGcsPath** : The Cloud Storage path pattern for the Python code containing your user-defined functions. (Example: gs://your-bucket/your-function.py). -* **pythonExternalTextTransformFunctionName** : The name of the function to call from your Python file. Use only letters, digits, and underscores. (Example: 'transform' or 'transform_udf1'). +* **inputFormat**: The input file format. Defaults to `CSV`. +* **containsHeaders**: Input CSV files contain a header record (true/false). Only required if reading CSV files. Defaults to: false. +* **delimiter**: The column delimiter of the input text files. Default: `,` For example, `,`. +* **csvFormat**: CSV format specification to use for parsing records. Default is: `Default`. See https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.html for more details. Must match format names exactly found at: https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.Predefined.html. +* **jsonSchemaPath**: The path to the JSON schema. Defaults to `null`. For example, `gs://path/to/schema`. +* **largeNumFiles**: Set to true if number of files is in the tens of thousands. Defaults to `false`. +* **csvFileEncoding**: The CSV file character encoding format. Allowed values are `US-ASCII`, `ISO-8859-1`, `UTF-8`, and `UTF-16`. Defaults to: UTF-8. +* **logDetailedCsvConversionErrors**: Set to `true` to enable detailed error logging when CSV parsing fails. Note that this may expose sensitive data in the logs (e.g., if the CSV file contains passwords). Default: `false`. +* **elasticsearchUsername**: The Elasticsearch username to authenticate with. If specified, the value of `apiKey` is ignored. +* **elasticsearchPassword**: The Elasticsearch password to authenticate with. If specified, the value of `apiKey` is ignored. +* **batchSize**: The batch size in number of documents. Defaults to `1000`. +* **batchSizeBytes**: The batch size in number of bytes. Defaults to `5242880` (5mb). +* **maxRetryAttempts**: The maximum number of retry attempts. Must be greater than zero. Defaults to `no retries`. +* **maxRetryDuration**: The maximum retry duration in milliseconds. Must be greater than zero. Defaults to `no retries`. +* **propertyAsIndex**: The property in the document being indexed whose value specifies `_index` metadata to include with the document in bulk requests. Takes precedence over an `_index` UDF. Defaults to `none`. +* **javaScriptIndexFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for a function that specifies `_index` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIndexFnName**: The name of the UDF JavaScript function that specifies `_index` metadata to include with the document in bulk requests. Defaults to `none`. +* **propertyAsId**: A property in the document being indexed whose value specifies `_id` metadata to include with the document in bulk requests. Takes precedence over an `_id` UDF. Defaults to `none`. +* **javaScriptIdFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for the function that specifies `_id` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIdFnName**: The name of the UDF JavaScript function that specifies the `_id` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptTypeFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for a function that specifies `_type` metadata to include with documents in bulk requests. Defaults to `none`. +* **javaScriptTypeFnName**: The name of the UDF JavaScript function that specifies the `_type` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIsDeleteFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for the function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to `none`. +* **javaScriptIsDeleteFnName**: The name of the UDF JavaScript function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to `none`. +* **usePartialUpdate**: Whether to use partial updates (update rather than create or index, allowing partial documents) with Elasticsearch requests. Defaults to `false`. +* **bulkInsertMethod**: Whether to use `INDEX` (index, allows upserts) or `CREATE` (create, errors on duplicate _id) with Elasticsearch bulk requests. Defaults to `CREATE`. +* **trustSelfSignedCerts**: Whether to trust self-signed certificate or not. An Elasticsearch instance installed might have a self-signed certificate, Enable this to true to by-pass the validation on SSL certificate. (Defaults to: `false`). +* **disableCertificateValidation**: If `true`, trust the self-signed SSL certificate. An Elasticsearch instance might have a self-signed certificate. To bypass validation for the certificate, set this parameter to `true`. Defaults to `false`. +* **apiKeyKMSEncryptionKey**: The Cloud KMS key to decrypt the API key. This parameter is required if the `apiKeySource` is set to `KMS`. If this parameter is provided, pass in an encrypted `apiKey` string. Encrypt parameters using the KMS API encrypt endpoint. For the key, use the format `projects//locations//keyRings//cryptoKeys/`. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt For example, `projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name`. +* **apiKeySecretId**: The Secret Manager secret ID for the apiKey. If the `apiKeySource` is set to `SECRET_MANAGER`, provide this parameter. Use the format `projects//secrets//versions/. For example, `projects/your-project-id/secrets/your-secret/versions/your-secret-version`. +* **apiKeySource**: The source of the API key. Allowed values are `PLAINTEXT`, `KMS` orand `SECRET_MANAGER`. This parameter is required when you use Secret Manager or KMS. If `apiKeySource` is set to `KMS`, `apiKeyKMSEncryptionKey` and encrypted apiKey must be provided. If `apiKeySource` is set to `SECRET_MANAGER`, `apiKeySecretId` must be provided. If `apiKeySource` is set to `PLAINTEXT`, `apiKey` must be provided. Defaults to: PLAINTEXT. +* **socketTimeout**: If set, overwrites the default max retry timeout and default socket timeout (30000ms) in the Elastic RestClient. +* **pythonExternalTextTransformGcsPath**: The Cloud Storage path pattern for the Python code containing your user-defined functions. For example, `gs://your-bucket/your-function.py`. +* **pythonExternalTextTransformFunctionName**: The name of the function to call from your Python file. Use only letters, digits, and underscores. For example, `'transform' or 'transform_udf1'`. @@ -344,16 +344,16 @@ resource "google_dataflow_flex_template_job" "gcs_to_elasticsearch_xlang" { name = "gcs-to-elasticsearch-xlang" region = var.region parameters = { - deadletterTable = "your-project:your-dataset.your-table-name" + deadletterTable = "" inputFileSpec = "" - connectionUrl = "https://elasticsearch-host:9200" + connectionUrl = "" apiKey = "" - index = "my-index" + index = "" # inputFormat = "csv" # containsHeaders = "false" - # delimiter = "," + # delimiter = "" # csvFormat = "Default" - # jsonSchemaPath = "gs://path/to/schema" + # jsonSchemaPath = "" # largeNumFiles = "false" # csvFileEncoding = "UTF-8" # logDetailedCsvConversionErrors = "false" @@ -377,12 +377,12 @@ resource "google_dataflow_flex_template_job" "gcs_to_elasticsearch_xlang" { # bulkInsertMethod = "CREATE" # trustSelfSignedCerts = "false" # disableCertificateValidation = "false" - # apiKeyKMSEncryptionKey = "projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name" - # apiKeySecretId = "projects/your-project-id/secrets/your-secret/versions/your-secret-version" + # apiKeyKMSEncryptionKey = "" + # apiKeySecretId = "" # apiKeySource = "PLAINTEXT" # socketTimeout = "" - # pythonExternalTextTransformGcsPath = "gs://your-bucket/your-function.py" - # pythonExternalTextTransformFunctionName = "'transform' or 'transform_udf1'" + # pythonExternalTextTransformGcsPath = "" + # pythonExternalTextTransformFunctionName = "" } } ``` diff --git a/v2/googlecloud-to-elasticsearch/README_PubSub_to_Elasticsearch_Flex.md b/v2/googlecloud-to-elasticsearch/README_PubSub_to_Elasticsearch_Flex.md index 0d1686bf5e..e0186245a7 100644 --- a/v2/googlecloud-to-elasticsearch/README_PubSub_to_Elasticsearch_Flex.md +++ b/v2/googlecloud-to-elasticsearch/README_PubSub_to_Elasticsearch_Flex.md @@ -29,43 +29,43 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputSubscription** : Pub/Sub subscription to consume the input from. Name should be in the format of 'projects/your-project-id/subscriptions/your-subscription-name' (Example: projects/your-project-id/subscriptions/your-subscription-name). -* **errorOutputTopic** : Pub/Sub output topic for publishing failed records in the format of 'projects/your-project-id/topics/your-topic-name'. -* **connectionUrl** : The Elasticsearch URL in the format https://hostname:[port]. If using Elastic Cloud, specify the CloudID. (Example: https://elasticsearch-host:9200). -* **apiKey** : The Base64-encoded API key to use for authentication. +* **inputSubscription**: Pub/Sub subscription to consume the input from. For example, `projects//subscriptions/`. +* **errorOutputTopic**: The Pub/Sub output topic for publishing failed records, in the format of `projects//topics/`. +* **connectionUrl**: The Elasticsearch URL in the format `https://hostname:[port]`. If using Elastic Cloud, specify the CloudID. For example, `https://elasticsearch-host:9200`. +* **apiKey**: The Base64-encoded API key to use for authentication. ### Optional parameters -* **dataset** : The type of logs sent using Pub/Sub, for which we have an out-of-the-box dashboard. Known log types values are audit, vpcflow and firewall. Default 'pubsub'. -* **namespace** : An arbitrary grouping, such as an environment (dev, prod, or qa), a team, or a strategic business unit. Default: 'default'. -* **elasticsearchTemplateVersion** : Dataflow Template Version Identifier, usually defined by Google Cloud. Defaults to: 1.0.0. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. (Example: gs://my-bucket/my-udfs/my_file.js). -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **javascriptTextTransformReloadIntervalMinutes** : Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is 0, UDF reloading is disabled. The default value is 0. -* **elasticsearchUsername** : The Elasticsearch username to authenticate with. If specified, the value of 'apiKey' is ignored. -* **elasticsearchPassword** : The Elasticsearch password to authenticate with. If specified, the value of 'apiKey' is ignored. -* **batchSize** : The batch size in number of documents. Defaults to: 1000. -* **batchSizeBytes** : The batch size in number of bytes. Defaults to: 5242880 (5mb). -* **maxRetryAttempts** : The maximum number of retry attempts. Must be greater than zero. Defaults to: no retries. -* **maxRetryDuration** : The maximum retry duration in milliseconds. Must be greater than zero. Defaults to: no retries. -* **propertyAsIndex** : The property in the document being indexed whose value specifies `_index` metadata to include with the document in bulk requests. Takes precedence over an `_index` UDF. Defaults to: none. -* **javaScriptIndexFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for a function that specifies `_index` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIndexFnName** : The name of the UDF JavaScript function that specifies `_index` metadata to include with the document in bulk requests. Defaults to: none. -* **propertyAsId** : A property in the document being indexed whose value specifies `_id` metadata to include with the document in bulk requests. Takes precedence over an `_id` UDF. Defaults to: none. -* **javaScriptIdFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for the function that specifies `_id` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIdFnName** : The name of the UDF JavaScript function that specifies the `_id` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptTypeFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for a function that specifies `_type` metadata to include with documents in bulk requests. Default: none. -* **javaScriptTypeFnName** : The name of the UDF JavaScript function that specifies the `_type` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIsDeleteFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for the function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to: none. -* **javaScriptIsDeleteFnName** : The name of the UDF JavaScript function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to: none. -* **usePartialUpdate** : Whether to use partial updates (update rather than create or index, allowing partial documents) with Elasticsearch requests. Defaults to: false. -* **bulkInsertMethod** : Whether to use `INDEX` (index, allows upserts) or `CREATE` (create, errors on duplicate _id) with Elasticsearch bulk requests. Defaults to: CREATE. -* **trustSelfSignedCerts** : Whether to trust self-signed certificate or not. An Elasticsearch instance installed might have a self-signed certificate, Enable this to True to by-pass the validation on SSL certificate. (default is False). -* **disableCertificateValidation** : If 'true', trust the self-signed SSL certificate. An Elasticsearch instance might have a self-signed certificate. To bypass validation for the certificate, set this parameter to 'true'. Default: false. -* **apiKeyKMSEncryptionKey** : The Cloud KMS key to decrypt the API key. This parameter must be provided if the apiKeySource is set to KMS. If this parameter is provided, apiKey string should be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. The Key should be in the format projects/{gcp_project}/locations/{key_region}/keyRings/{key_ring}/cryptoKeys/{kms_key_name}. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt (Example: projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name). -* **apiKeySecretId** : Secret Manager secret ID for the apiKey. This parameter should be provided if the apiKeySource is set to SECRET_MANAGER. Should be in the format projects/{project}/secrets/{secret}/versions/{secret_version}. (Example: projects/your-project-id/secrets/your-secret/versions/your-secret-version). -* **apiKeySource** : Source of the API key. One of PLAINTEXT, KMS or SECRET_MANAGER. This parameter must be provided if secret manager or KMS is used. If apiKeySource is set to KMS, apiKeyKMSEncryptionKey and encrypted apiKey must be provided. If apiKeySource is set to SECRET_MANAGER, apiKeySecretId must be provided. If apiKeySource is set to PLAINTEXT, apiKey must be provided. Defaults to: PLAINTEXT. -* **socketTimeout** : If set, overwrites the default max retry timeout and default socket timeout (30000ms) in the Elastic RestClient. +* **dataset**: The type of logs sent using Pub/Sub, for which we have an out-of-the-box dashboard. Known log types values are `audit`, `vpcflow`, and `firewall`. Defaults to: `pubsub`. +* **namespace**: An arbitrary grouping, such as an environment (dev, prod, or qa), a team, or a strategic business unit. Defaults to: `default`. +* **elasticsearchTemplateVersion**: Dataflow Template Version Identifier, usually defined by Google Cloud. Defaults to: 1.0.0. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **javascriptTextTransformReloadIntervalMinutes**: Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is `0`, UDF reloading is disabled. The default value is `0`. +* **elasticsearchUsername**: The Elasticsearch username to authenticate with. If specified, the value of `apiKey` is ignored. +* **elasticsearchPassword**: The Elasticsearch password to authenticate with. If specified, the value of `apiKey` is ignored. +* **batchSize**: The batch size in number of documents. Defaults to `1000`. +* **batchSizeBytes**: The batch size in number of bytes. Defaults to `5242880` (5mb). +* **maxRetryAttempts**: The maximum number of retry attempts. Must be greater than zero. Defaults to `no retries`. +* **maxRetryDuration**: The maximum retry duration in milliseconds. Must be greater than zero. Defaults to `no retries`. +* **propertyAsIndex**: The property in the document being indexed whose value specifies `_index` metadata to include with the document in bulk requests. Takes precedence over an `_index` UDF. Defaults to `none`. +* **javaScriptIndexFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for a function that specifies `_index` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIndexFnName**: The name of the UDF JavaScript function that specifies `_index` metadata to include with the document in bulk requests. Defaults to `none`. +* **propertyAsId**: A property in the document being indexed whose value specifies `_id` metadata to include with the document in bulk requests. Takes precedence over an `_id` UDF. Defaults to `none`. +* **javaScriptIdFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for the function that specifies `_id` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIdFnName**: The name of the UDF JavaScript function that specifies the `_id` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptTypeFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for a function that specifies `_type` metadata to include with documents in bulk requests. Defaults to `none`. +* **javaScriptTypeFnName**: The name of the UDF JavaScript function that specifies the `_type` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIsDeleteFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for the function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to `none`. +* **javaScriptIsDeleteFnName**: The name of the UDF JavaScript function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to `none`. +* **usePartialUpdate**: Whether to use partial updates (update rather than create or index, allowing partial documents) with Elasticsearch requests. Defaults to `false`. +* **bulkInsertMethod**: Whether to use `INDEX` (index, allows upserts) or `CREATE` (create, errors on duplicate _id) with Elasticsearch bulk requests. Defaults to `CREATE`. +* **trustSelfSignedCerts**: Whether to trust self-signed certificate or not. An Elasticsearch instance installed might have a self-signed certificate, Enable this to true to by-pass the validation on SSL certificate. (Defaults to: `false`). +* **disableCertificateValidation**: If `true`, trust the self-signed SSL certificate. An Elasticsearch instance might have a self-signed certificate. To bypass validation for the certificate, set this parameter to `true`. Defaults to `false`. +* **apiKeyKMSEncryptionKey**: The Cloud KMS key to decrypt the API key. This parameter is required if the `apiKeySource` is set to `KMS`. If this parameter is provided, pass in an encrypted `apiKey` string. Encrypt parameters using the KMS API encrypt endpoint. For the key, use the format `projects//locations//keyRings//cryptoKeys/`. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt For example, `projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name`. +* **apiKeySecretId**: The Secret Manager secret ID for the apiKey. If the `apiKeySource` is set to `SECRET_MANAGER`, provide this parameter. Use the format `projects//secrets//versions/. For example, `projects/your-project-id/secrets/your-secret/versions/your-secret-version`. +* **apiKeySource**: The source of the API key. Allowed values are `PLAINTEXT`, `KMS` orand `SECRET_MANAGER`. This parameter is required when you use Secret Manager or KMS. If `apiKeySource` is set to `KMS`, `apiKeyKMSEncryptionKey` and encrypted apiKey must be provided. If `apiKeySource` is set to `SECRET_MANAGER`, `apiKeySecretId` must be provided. If `apiKeySource` is set to `PLAINTEXT`, `apiKey` must be provided. Defaults to: PLAINTEXT. +* **socketTimeout**: If set, overwrites the default max retry timeout and default socket timeout (30000ms) in the Elastic RestClient. ## User-Defined functions (UDFs) @@ -336,14 +336,14 @@ resource "google_dataflow_flex_template_job" "pubsub_to_elasticsearch_flex" { name = "pubsub-to-elasticsearch-flex" region = var.region parameters = { - inputSubscription = "projects/your-project-id/subscriptions/your-subscription-name" + inputSubscription = "" errorOutputTopic = "" - connectionUrl = "https://elasticsearch-host:9200" + connectionUrl = "" apiKey = "" # dataset = "PUBSUB" # namespace = "default" # elasticsearchTemplateVersion = "1.0.0" - # javascriptTextTransformGcsPath = "gs://my-bucket/my-udfs/my_file.js" + # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" # javascriptTextTransformReloadIntervalMinutes = "0" # elasticsearchUsername = "" @@ -366,8 +366,8 @@ resource "google_dataflow_flex_template_job" "pubsub_to_elasticsearch_flex" { # bulkInsertMethod = "CREATE" # trustSelfSignedCerts = "false" # disableCertificateValidation = "false" - # apiKeyKMSEncryptionKey = "projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name" - # apiKeySecretId = "projects/your-project-id/secrets/your-secret/versions/your-secret-version" + # apiKeyKMSEncryptionKey = "" + # apiKeySecretId = "" # apiKeySource = "PLAINTEXT" # socketTimeout = "" } diff --git a/v2/googlecloud-to-elasticsearch/README_PubSub_to_Elasticsearch_Xlang.md b/v2/googlecloud-to-elasticsearch/README_PubSub_to_Elasticsearch_Xlang.md index 2be0f0271b..13d30d899b 100644 --- a/v2/googlecloud-to-elasticsearch/README_PubSub_to_Elasticsearch_Xlang.md +++ b/v2/googlecloud-to-elasticsearch/README_PubSub_to_Elasticsearch_Xlang.md @@ -30,42 +30,42 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputSubscription** : Pub/Sub subscription to consume the input from. Name should be in the format of 'projects/your-project-id/subscriptions/your-subscription-name' (Example: projects/your-project-id/subscriptions/your-subscription-name). -* **errorOutputTopic** : Pub/Sub output topic for publishing failed records in the format of 'projects/your-project-id/topics/your-topic-name'. -* **connectionUrl** : The Elasticsearch URL in the format https://hostname:[port]. If using Elastic Cloud, specify the CloudID. (Example: https://elasticsearch-host:9200). -* **apiKey** : The Base64-encoded API key to use for authentication. +* **inputSubscription**: Pub/Sub subscription to consume the input from. For example, `projects//subscriptions/`. +* **errorOutputTopic**: The Pub/Sub output topic for publishing failed records, in the format of `projects//topics/`. +* **connectionUrl**: The Elasticsearch URL in the format `https://hostname:[port]`. If using Elastic Cloud, specify the CloudID. For example, `https://elasticsearch-host:9200`. +* **apiKey**: The Base64-encoded API key to use for authentication. ### Optional parameters -* **dataset** : The type of logs sent using Pub/Sub, for which we have an out-of-the-box dashboard. Known log types values are audit, vpcflow and firewall. Default 'pubsub'. -* **namespace** : An arbitrary grouping, such as an environment (dev, prod, or qa), a team, or a strategic business unit. Default: 'default'. -* **elasticsearchTemplateVersion** : Dataflow Template Version Identifier, usually defined by Google Cloud. Defaults to: 1.0.0. -* **pythonExternalTextTransformGcsPath** : The Cloud Storage path pattern for the Python code containing your user-defined functions. (Example: gs://your-bucket/your-function.py). -* **pythonExternalTextTransformFunctionName** : The name of the function to call from your Python file. Use only letters, digits, and underscores. (Example: 'transform' or 'transform_udf1'). -* **elasticsearchUsername** : The Elasticsearch username to authenticate with. If specified, the value of 'apiKey' is ignored. -* **elasticsearchPassword** : The Elasticsearch password to authenticate with. If specified, the value of 'apiKey' is ignored. -* **batchSize** : The batch size in number of documents. Defaults to: 1000. -* **batchSizeBytes** : The batch size in number of bytes. Defaults to: 5242880 (5mb). -* **maxRetryAttempts** : The maximum number of retry attempts. Must be greater than zero. Defaults to: no retries. -* **maxRetryDuration** : The maximum retry duration in milliseconds. Must be greater than zero. Defaults to: no retries. -* **propertyAsIndex** : The property in the document being indexed whose value specifies `_index` metadata to include with the document in bulk requests. Takes precedence over an `_index` UDF. Defaults to: none. -* **javaScriptIndexFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for a function that specifies `_index` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIndexFnName** : The name of the UDF JavaScript function that specifies `_index` metadata to include with the document in bulk requests. Defaults to: none. -* **propertyAsId** : A property in the document being indexed whose value specifies `_id` metadata to include with the document in bulk requests. Takes precedence over an `_id` UDF. Defaults to: none. -* **javaScriptIdFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for the function that specifies `_id` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIdFnName** : The name of the UDF JavaScript function that specifies the `_id` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptTypeFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for a function that specifies `_type` metadata to include with documents in bulk requests. Default: none. -* **javaScriptTypeFnName** : The name of the UDF JavaScript function that specifies the `_type` metadata to include with the document in bulk requests. Defaults to: none. -* **javaScriptIsDeleteFnGcsPath** : The Cloud Storage path to the JavaScript UDF source for the function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to: none. -* **javaScriptIsDeleteFnName** : The name of the UDF JavaScript function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to: none. -* **usePartialUpdate** : Whether to use partial updates (update rather than create or index, allowing partial documents) with Elasticsearch requests. Defaults to: false. -* **bulkInsertMethod** : Whether to use `INDEX` (index, allows upserts) or `CREATE` (create, errors on duplicate _id) with Elasticsearch bulk requests. Defaults to: CREATE. -* **trustSelfSignedCerts** : Whether to trust self-signed certificate or not. An Elasticsearch instance installed might have a self-signed certificate, Enable this to True to by-pass the validation on SSL certificate. (default is False). -* **disableCertificateValidation** : If 'true', trust the self-signed SSL certificate. An Elasticsearch instance might have a self-signed certificate. To bypass validation for the certificate, set this parameter to 'true'. Default: false. -* **apiKeyKMSEncryptionKey** : The Cloud KMS key to decrypt the API key. This parameter must be provided if the apiKeySource is set to KMS. If this parameter is provided, apiKey string should be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. The Key should be in the format projects/{gcp_project}/locations/{key_region}/keyRings/{key_ring}/cryptoKeys/{kms_key_name}. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt (Example: projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name). -* **apiKeySecretId** : Secret Manager secret ID for the apiKey. This parameter should be provided if the apiKeySource is set to SECRET_MANAGER. Should be in the format projects/{project}/secrets/{secret}/versions/{secret_version}. (Example: projects/your-project-id/secrets/your-secret/versions/your-secret-version). -* **apiKeySource** : Source of the API key. One of PLAINTEXT, KMS or SECRET_MANAGER. This parameter must be provided if secret manager or KMS is used. If apiKeySource is set to KMS, apiKeyKMSEncryptionKey and encrypted apiKey must be provided. If apiKeySource is set to SECRET_MANAGER, apiKeySecretId must be provided. If apiKeySource is set to PLAINTEXT, apiKey must be provided. Defaults to: PLAINTEXT. -* **socketTimeout** : If set, overwrites the default max retry timeout and default socket timeout (30000ms) in the Elastic RestClient. +* **dataset**: The type of logs sent using Pub/Sub, for which we have an out-of-the-box dashboard. Known log types values are `audit`, `vpcflow`, and `firewall`. Defaults to: `pubsub`. +* **namespace**: An arbitrary grouping, such as an environment (dev, prod, or qa), a team, or a strategic business unit. Defaults to: `default`. +* **elasticsearchTemplateVersion**: Dataflow Template Version Identifier, usually defined by Google Cloud. Defaults to: 1.0.0. +* **pythonExternalTextTransformGcsPath**: The Cloud Storage path pattern for the Python code containing your user-defined functions. For example, `gs://your-bucket/your-function.py`. +* **pythonExternalTextTransformFunctionName**: The name of the function to call from your Python file. Use only letters, digits, and underscores. For example, `'transform' or 'transform_udf1'`. +* **elasticsearchUsername**: The Elasticsearch username to authenticate with. If specified, the value of `apiKey` is ignored. +* **elasticsearchPassword**: The Elasticsearch password to authenticate with. If specified, the value of `apiKey` is ignored. +* **batchSize**: The batch size in number of documents. Defaults to `1000`. +* **batchSizeBytes**: The batch size in number of bytes. Defaults to `5242880` (5mb). +* **maxRetryAttempts**: The maximum number of retry attempts. Must be greater than zero. Defaults to `no retries`. +* **maxRetryDuration**: The maximum retry duration in milliseconds. Must be greater than zero. Defaults to `no retries`. +* **propertyAsIndex**: The property in the document being indexed whose value specifies `_index` metadata to include with the document in bulk requests. Takes precedence over an `_index` UDF. Defaults to `none`. +* **javaScriptIndexFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for a function that specifies `_index` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIndexFnName**: The name of the UDF JavaScript function that specifies `_index` metadata to include with the document in bulk requests. Defaults to `none`. +* **propertyAsId**: A property in the document being indexed whose value specifies `_id` metadata to include with the document in bulk requests. Takes precedence over an `_id` UDF. Defaults to `none`. +* **javaScriptIdFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for the function that specifies `_id` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIdFnName**: The name of the UDF JavaScript function that specifies the `_id` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptTypeFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for a function that specifies `_type` metadata to include with documents in bulk requests. Defaults to `none`. +* **javaScriptTypeFnName**: The name of the UDF JavaScript function that specifies the `_type` metadata to include with the document in bulk requests. Defaults to `none`. +* **javaScriptIsDeleteFnGcsPath**: The Cloud Storage path to the JavaScript UDF source for the function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to `none`. +* **javaScriptIsDeleteFnName**: The name of the UDF JavaScript function that determines whether to delete the document instead of inserting or updating it. The function returns a string value of `true` or `false`. Defaults to `none`. +* **usePartialUpdate**: Whether to use partial updates (update rather than create or index, allowing partial documents) with Elasticsearch requests. Defaults to `false`. +* **bulkInsertMethod**: Whether to use `INDEX` (index, allows upserts) or `CREATE` (create, errors on duplicate _id) with Elasticsearch bulk requests. Defaults to `CREATE`. +* **trustSelfSignedCerts**: Whether to trust self-signed certificate or not. An Elasticsearch instance installed might have a self-signed certificate, Enable this to true to by-pass the validation on SSL certificate. (Defaults to: `false`). +* **disableCertificateValidation**: If `true`, trust the self-signed SSL certificate. An Elasticsearch instance might have a self-signed certificate. To bypass validation for the certificate, set this parameter to `true`. Defaults to `false`. +* **apiKeyKMSEncryptionKey**: The Cloud KMS key to decrypt the API key. This parameter is required if the `apiKeySource` is set to `KMS`. If this parameter is provided, pass in an encrypted `apiKey` string. Encrypt parameters using the KMS API encrypt endpoint. For the key, use the format `projects//locations//keyRings//cryptoKeys/`. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt For example, `projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name`. +* **apiKeySecretId**: The Secret Manager secret ID for the apiKey. If the `apiKeySource` is set to `SECRET_MANAGER`, provide this parameter. Use the format `projects//secrets//versions/. For example, `projects/your-project-id/secrets/your-secret/versions/your-secret-version`. +* **apiKeySource**: The source of the API key. Allowed values are `PLAINTEXT`, `KMS` orand `SECRET_MANAGER`. This parameter is required when you use Secret Manager or KMS. If `apiKeySource` is set to `KMS`, `apiKeyKMSEncryptionKey` and encrypted apiKey must be provided. If `apiKeySource` is set to `SECRET_MANAGER`, `apiKeySecretId` must be provided. If `apiKeySource` is set to `PLAINTEXT`, `apiKey` must be provided. Defaults to: PLAINTEXT. +* **socketTimeout**: If set, overwrites the default max retry timeout and default socket timeout (30000ms) in the Elastic RestClient. @@ -323,15 +323,15 @@ resource "google_dataflow_flex_template_job" "pubsub_to_elasticsearch_xlang" { name = "pubsub-to-elasticsearch-xlang" region = var.region parameters = { - inputSubscription = "projects/your-project-id/subscriptions/your-subscription-name" + inputSubscription = "" errorOutputTopic = "" - connectionUrl = "https://elasticsearch-host:9200" + connectionUrl = "" apiKey = "" # dataset = "PUBSUB" # namespace = "default" # elasticsearchTemplateVersion = "1.0.0" - # pythonExternalTextTransformGcsPath = "gs://your-bucket/your-function.py" - # pythonExternalTextTransformFunctionName = "'transform' or 'transform_udf1'" + # pythonExternalTextTransformGcsPath = "" + # pythonExternalTextTransformFunctionName = "" # elasticsearchUsername = "" # elasticsearchPassword = "" # batchSize = "1000" @@ -352,8 +352,8 @@ resource "google_dataflow_flex_template_job" "pubsub_to_elasticsearch_xlang" { # bulkInsertMethod = "CREATE" # trustSelfSignedCerts = "false" # disableCertificateValidation = "false" - # apiKeyKMSEncryptionKey = "projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name" - # apiKeySecretId = "projects/your-project-id/secrets/your-secret/versions/your-secret-version" + # apiKeyKMSEncryptionKey = "" + # apiKeySecretId = "" # apiKeySource = "PLAINTEXT" # socketTimeout = "" } diff --git a/v2/googlecloud-to-elasticsearch/src/main/java/com/google/cloud/teleport/v2/elasticsearch/options/GCSToElasticsearchOptions.java b/v2/googlecloud-to-elasticsearch/src/main/java/com/google/cloud/teleport/v2/elasticsearch/options/GCSToElasticsearchOptions.java index b5d1e728db..db0d822d10 100644 --- a/v2/googlecloud-to-elasticsearch/src/main/java/com/google/cloud/teleport/v2/elasticsearch/options/GCSToElasticsearchOptions.java +++ b/v2/googlecloud-to-elasticsearch/src/main/java/com/google/cloud/teleport/v2/elasticsearch/options/GCSToElasticsearchOptions.java @@ -45,7 +45,7 @@ public interface GCSToElasticsearchOptions optional = true, regexes = {"[a-zA-Z0-9._-]+"}, description = "Input file format", - helpText = "Input file format. Default is: CSV") + helpText = "The input file format. Defaults to `CSV`.") @Default.String("csv") String getInputFormat(); diff --git a/v2/googlecloud-to-elasticsearch/src/main/java/com/google/cloud/teleport/v2/elasticsearch/options/PubSubToElasticsearchOptions.java b/v2/googlecloud-to-elasticsearch/src/main/java/com/google/cloud/teleport/v2/elasticsearch/options/PubSubToElasticsearchOptions.java index bc74f753af..df6fc13ef6 100644 --- a/v2/googlecloud-to-elasticsearch/src/main/java/com/google/cloud/teleport/v2/elasticsearch/options/PubSubToElasticsearchOptions.java +++ b/v2/googlecloud-to-elasticsearch/src/main/java/com/google/cloud/teleport/v2/elasticsearch/options/PubSubToElasticsearchOptions.java @@ -38,9 +38,8 @@ public interface PubSubToElasticsearchOptions order = 1, groupName = "Source", description = "Pub/Sub input subscription", - helpText = - "Pub/Sub subscription to consume the input from. Name should be in the format of 'projects/your-project-id/subscriptions/your-subscription-name'", - example = "projects/your-project-id/subscriptions/your-subscription-name") + helpText = "Pub/Sub subscription to consume the input from.", + example = "projects//subscriptions/") @Validation.Required String getInputSubscription(); @@ -52,7 +51,7 @@ public interface PubSubToElasticsearchOptions description = "Dataset, the type of logs that are sent to Pub/Sub", helpText = "The type of logs sent using Pub/Sub, for which we have an out-of-the-box dashboard. Known " - + "log types values are audit, vpcflow and firewall. Default 'pubsub'") + + "log types values are `audit`, `vpcflow`, and `firewall`. Defaults to: `pubsub`.") @Default.Enum("PUBSUB") Dataset getDataset(); @@ -63,7 +62,7 @@ public interface PubSubToElasticsearchOptions optional = true, description = "The namespace for dataset.", helpText = - "An arbitrary grouping, such as an environment (dev, prod, or qa), a team, or a strategic business unit. Default: 'default'") + "An arbitrary grouping, such as an environment (dev, prod, or qa), a team, or a strategic business unit. Defaults to: `default`.") @Default.String("default") String getNamespace(); @@ -73,7 +72,7 @@ public interface PubSubToElasticsearchOptions order = 4, description = "Output deadletter Pub/Sub topic", helpText = - "Pub/Sub output topic for publishing failed records in the format of 'projects/your-project-id/topics/your-topic-name'.") + "The Pub/Sub output topic for publishing failed records, in the format of `projects//topics/`.") @Validation.Required String getErrorOutputTopic(); diff --git a/v2/googlecloud-to-googlecloud/README_Bigtable_Change_Streams_to_BigQuery.md b/v2/googlecloud-to-googlecloud/README_Bigtable_Change_Streams_to_BigQuery.md index be97deef86..eda5430c38 100644 --- a/v2/googlecloud-to-googlecloud/README_Bigtable_Change_Streams_to_BigQuery.md +++ b/v2/googlecloud-to-googlecloud/README_Bigtable_Change_Streams_to_BigQuery.md @@ -17,31 +17,31 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **bigQueryDataset** : The dataset name of the destination BigQuery table. -* **bigtableChangeStreamAppProfile** : The Bigtable application profile ID. The application profile must use single-cluster routing and allow single-row transactions. -* **bigtableReadInstanceId** : The source Bigtable instance ID. -* **bigtableReadTableId** : The source Bigtable table ID. +* **bigQueryDataset**: The dataset name of the destination BigQuery table. +* **bigtableChangeStreamAppProfile**: The Bigtable application profile ID. The application profile must use single-cluster routing and allow single-row transactions. +* **bigtableReadInstanceId**: The source Bigtable instance ID. +* **bigtableReadTableId**: The source Bigtable table ID. ### Optional parameters -* **writeRowkeyAsBytes** : Whether to write rowkeys as BigQuery `BYTES`. When set to `true`, row keys are written to the `BYTES` column. Otherwise, rowkeys are written to the `STRING` column. Defaults to `false`. -* **writeValuesAsBytes** : When set true values are written to BYTES column, otherwise to STRING column. Defaults to false. -* **writeNumericTimestamps** : Whether to write the Bigtable timestamp as BigQuery `INT64`. When set to true, values are written to the `INT64` column. Otherwise, values are written to the `TIMESTAMP` column. Columns affected: `timestamp`, `timestamp_from`, and `timestamp_to`. Defaults to `false`. When set to `true`, the time is measured in microseconds since the Unix epoch (January 1, 1970 at UTC). -* **bigQueryProjectId** : The BigQuery dataset project ID. The default is the project for the Dataflow job. -* **bigQueryChangelogTableName** : Destination BigQuery table name. If not specified, the value `bigtableReadTableId + "_changelog"` is used. Defaults to empty. -* **bigQueryChangelogTablePartitionGranularity** : Specifies a granularity for partitioning the changelog table. When set, the table is partitioned. Use one of the following supported values: `HOUR`, `DAY`, `MONTH`, or `YEAR`. By default, the table isn't partitioned. -* **bigQueryChangelogTablePartitionExpirationMs** : Sets the changelog table partition expiration time, in milliseconds. When set to true, partitions older than the specified number of milliseconds are deleted. By default, no expiration is set. -* **bigQueryChangelogTableFieldsToIgnore** : A comma-separated list of the changelog columns that, when specified, aren't created and populated. Use one of the following supported values: `is_gc`, `source_instance`, `source_cluster`, `source_table`, `tiebreaker`, or `big_query_commit_timestamp`. By default, all columns are populated. -* **dlqDirectory** : The directory to use for the dead-letter queue. Records that fail to be processed are stored in this directory. The default is a directory under the Dataflow job's temp location. In most cases, you can use the default path. -* **bigtableChangeStreamMetadataInstanceId** : The Bigtable change streams metadata instance ID. Defaults to empty. -* **bigtableChangeStreamMetadataTableTableId** : The ID of the Bigtable change streams connector metadata table. If not provided, a Bigtable change streams connector metadata table is automatically created during pipeline execution. Defaults to empty. -* **bigtableChangeStreamCharset** : The Bigtable change streams charset name. Defaults to: UTF-8. -* **bigtableChangeStreamStartTimestamp** : The starting timestamp (https://tools.ietf.org/html/rfc3339), inclusive, to use for reading change streams. For example, `2022-05-05T07:59:59Z`. Defaults to the timestamp of the pipeline start time. -* **bigtableChangeStreamIgnoreColumnFamilies** : A comma-separated list of column family name changes to ignore. Defaults to empty. -* **bigtableChangeStreamIgnoreColumns** : A comma-separated list of column name changes to ignore. Defaults to empty. -* **bigtableChangeStreamName** : A unique name for the client pipeline. Lets you resume processing from the point at which a previously running pipeline stopped. Defaults to an automatically generated name. See the Dataflow job logs for the value used. -* **bigtableChangeStreamResume** : When set to `true`, a new pipeline resumes processing from the point at which a previously running pipeline with the same `bigtableChangeStreamName` value stopped. If the pipeline with the given `bigtableChangeStreamName` value has never run, a new pipeline doesn't start. When set to `false`, a new pipeline starts. If a pipeline with the same `bigtableChangeStreamName` value has already run for the given source, a new pipeline doesn't start. Defaults to `false`. -* **bigtableReadProjectId** : The Bigtable project ID. The default is the project for the Dataflow job. +* **writeRowkeyAsBytes**: Whether to write rowkeys as BigQuery `BYTES`. When set to `true`, row keys are written to the `BYTES` column. Otherwise, rowkeys are written to the `STRING` column. Defaults to `false`. +* **writeValuesAsBytes**: When set to `true`, values are written to a column of type BYTES, otherwise to a column of type STRING . Defaults to: `false`. +* **writeNumericTimestamps**: Whether to write the Bigtable timestamp as BigQuery INT64. When set to `true`, values are written to the INT64 column. Otherwise, values are written to the `TIMESTAMP` column. Columns affected: `timestamp`, `timestamp_from`, and `timestamp_to`. Defaults to `false`. When set to `true`, the time is measured in microseconds since the Unix epoch (January 1, 1970 at UTC). +* **bigQueryProjectId**: The BigQuery dataset project ID. The default is the project for the Dataflow job. +* **bigQueryChangelogTableName**: Destination BigQuery table name. If not specified, the value `bigtableReadTableId + "_changelog"` is used. Defaults to empty. +* **bigQueryChangelogTablePartitionGranularity**: Specifies a granularity for partitioning the changelog table. When set, the table is partitioned. Use one of the following supported values: `HOUR`, `DAY`, `MONTH`, or `YEAR`. By default, the table isn't partitioned. +* **bigQueryChangelogTablePartitionExpirationMs**: Sets the changelog table partition expiration time, in milliseconds. When set to `true`, partitions older than the specified number of milliseconds are deleted. By default, no expiration is set. +* **bigQueryChangelogTableFieldsToIgnore**: A comma-separated list of the changelog columns that, when specified, aren't created and populated. Use one of the following supported values: `is_gc`, `source_instance`, `source_cluster`, `source_table`, `tiebreaker`, or `big_query_commit_timestamp`. By default, all columns are populated. +* **dlqDirectory**: The directory to use for the dead-letter queue. Records that fail to be processed are stored in this directory. The default is a directory under the Dataflow job's temp location. In most cases, you can use the default path. +* **bigtableChangeStreamMetadataInstanceId**: The Bigtable change streams metadata instance ID. Defaults to empty. +* **bigtableChangeStreamMetadataTableTableId**: The ID of the Bigtable change streams connector metadata table. If not provided, a Bigtable change streams connector metadata table is automatically created during pipeline execution. Defaults to empty. +* **bigtableChangeStreamCharset**: The Bigtable change streams charset name. Defaults to: UTF-8. +* **bigtableChangeStreamStartTimestamp**: The starting timestamp (https://tools.ietf.org/html/rfc3339), inclusive, to use for reading change streams. For example, `2022-05-05T07:59:59Z`. Defaults to the timestamp of the pipeline start time. +* **bigtableChangeStreamIgnoreColumnFamilies**: A comma-separated list of column family name changes to ignore. Defaults to empty. +* **bigtableChangeStreamIgnoreColumns**: A comma-separated list of column name changes to ignore. Defaults to empty. +* **bigtableChangeStreamName**: A unique name for the client pipeline. Lets you resume processing from the point at which a previously running pipeline stopped. Defaults to an automatically generated name. See the Dataflow job logs for the value used. +* **bigtableChangeStreamResume**: When set to `true`, a new pipeline resumes processing from the point at which a previously running pipeline with the same `bigtableChangeStreamName` value stopped. If the pipeline with the given `bigtableChangeStreamName` value has never run, a new pipeline doesn't start. When set to `false`, a new pipeline starts. If a pipeline with the same `bigtableChangeStreamName` value has already run for the given source, a new pipeline doesn't start. Defaults to `false`. +* **bigtableReadProjectId**: The Bigtable project ID. The default is the project for the Dataflow job. diff --git a/v2/googlecloud-to-googlecloud/README_Bigtable_Change_Streams_to_Google_Cloud_Storage.md b/v2/googlecloud-to-googlecloud/README_Bigtable_Change_Streams_to_Google_Cloud_Storage.md index 31a7b125d2..67ab96c4c3 100644 --- a/v2/googlecloud-to-googlecloud/README_Bigtable_Change_Streams_to_Google_Cloud_Storage.md +++ b/v2/googlecloud-to-googlecloud/README_Bigtable_Change_Streams_to_Google_Cloud_Storage.md @@ -14,32 +14,32 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **gcsOutputDirectory** : The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse directory path for date & time formatters. (Example: gs://your-bucket/your-path). -* **bigtableChangeStreamAppProfile** : The Bigtable application profile ID. The application profile must use single-cluster routing and allow single-row transactions. -* **bigtableReadInstanceId** : The source Bigtable instance ID. -* **bigtableReadTableId** : The source Bigtable table ID. +* **gcsOutputDirectory**: The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse directory path for date & time formatters. For example, `gs://your-bucket/your-path`. +* **bigtableChangeStreamAppProfile**: The Bigtable application profile ID. The application profile must use single-cluster routing and allow single-row transactions. +* **bigtableReadInstanceId**: The source Bigtable instance ID. +* **bigtableReadTableId**: The source Bigtable table ID. ### Optional parameters -* **outputFileFormat** : The format of the output Cloud Storage file. Allowed formats are TEXT, AVRO. Defaults to AVRO. -* **windowDuration** : The window duration/size in which data will be written to Cloud Storage. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). (Example: 1h). Defaults to: 1h. -* **bigtableMetadataTableTableId** : Table ID used for creating the metadata table. -* **schemaOutputFormat** : Schema chosen for outputting data to GCS. CHANGELOG_ENTRY support TEXT and AVRO output formats, BIGTABLE_ROW only supports AVRO output. Defaults to: CHANGELOG_ENTRY. -* **outputFilenamePrefix** : The prefix to place on each windowed file. Defaults to "changelog-" (Example: changelog-). -* **outputBatchSize** : Batching mutations reduces overhead and cost. Depending on the size of values written to Cloud Bigtable the batch size might need to be adjusted lower to avoid memory pressures on the worker fleet. Defaults to 10000. -* **outputShardsCount** : The maximum number of output shards produced when writing to Cloud Storage. A higher number of shards means higher throughput for writing to Cloud Storage, but potentially higher data aggregation cost across shards when processing output Cloud Storage files. Defaults to: 20. -* **useBase64Rowkeys** : Only supported for the TEXT output file format. When set to true, rowkeys will be written as Base64-encoded strings. Otherwise bigtableChangeStreamCharset charset will be used to decode binary values into String rowkeysDefaults to false. -* **useBase64ColumnQualifiers** : Only supported for the TEXT output file format. When set to true, column qualifiers will be written as Base64-encoded strings. Otherwise bigtableChangeStreamCharset charset will be used to decode binary values into String column qualifiersDefaults to false. -* **useBase64Values** : Only supported for the TEXT output file format. When set to true, values will be written as Base64-encoded strings. Otherwise bigtableChangeStreamCharset charset will be used to decode binary values into String valuesDefaults to false. -* **bigtableChangeStreamMetadataInstanceId** : The Bigtable change streams metadata instance ID. Defaults to empty. -* **bigtableChangeStreamMetadataTableTableId** : The ID of the Bigtable change streams connector metadata table. If not provided, a Bigtable change streams connector metadata table is automatically created during pipeline execution. Defaults to empty. -* **bigtableChangeStreamCharset** : The Bigtable change streams charset name. Defaults to: UTF-8. -* **bigtableChangeStreamStartTimestamp** : The starting timestamp (https://tools.ietf.org/html/rfc3339), inclusive, to use for reading change streams. For example, `2022-05-05T07:59:59Z`. Defaults to the timestamp of the pipeline start time. -* **bigtableChangeStreamIgnoreColumnFamilies** : A comma-separated list of column family name changes to ignore. Defaults to empty. -* **bigtableChangeStreamIgnoreColumns** : A comma-separated list of column name changes to ignore. Defaults to empty. -* **bigtableChangeStreamName** : A unique name for the client pipeline. Lets you resume processing from the point at which a previously running pipeline stopped. Defaults to an automatically generated name. See the Dataflow job logs for the value used. -* **bigtableChangeStreamResume** : When set to `true`, a new pipeline resumes processing from the point at which a previously running pipeline with the same `bigtableChangeStreamName` value stopped. If the pipeline with the given `bigtableChangeStreamName` value has never run, a new pipeline doesn't start. When set to `false`, a new pipeline starts. If a pipeline with the same `bigtableChangeStreamName` value has already run for the given source, a new pipeline doesn't start. Defaults to `false`. -* **bigtableReadProjectId** : The Bigtable project ID. The default is the project for the Dataflow job. +* **outputFileFormat**: The format of the output Cloud Storage file. Allowed formats are TEXT, AVRO. Defaults to AVRO. +* **windowDuration**: The window duration/size in which data will be written to Cloud Storage. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). For example, `1h`. Defaults to: 1h. +* **bigtableMetadataTableTableId**: Table ID used for creating the metadata table. +* **schemaOutputFormat**: Schema chosen for outputting data to GCS. CHANGELOG_ENTRY support TEXT and AVRO output formats, BIGTABLE_ROW only supports AVRO output. Defaults to: CHANGELOG_ENTRY. +* **outputFilenamePrefix**: The prefix to place on each windowed file. Defaults to "changelog-" For example, `changelog-`. +* **outputBatchSize**: Batching mutations reduces overhead and cost. Depending on the size of values written to Cloud Bigtable the batch size might need to be adjusted lower to avoid memory pressures on the worker fleet. Defaults to 10000. +* **outputShardsCount**: The maximum number of output shards produced when writing to Cloud Storage. A higher number of shards means higher throughput for writing to Cloud Storage, but potentially higher data aggregation cost across shards when processing output Cloud Storage files. Defaults to: 20. +* **useBase64Rowkeys**: Only supported for the TEXT output file format. When set to true, rowkeys will be written as Base64-encoded strings. Otherwise bigtableChangeStreamCharset charset will be used to decode binary values into String rowkeysDefaults to false. +* **useBase64ColumnQualifiers**: Only supported for the TEXT output file format. When set to true, column qualifiers will be written as Base64-encoded strings. Otherwise bigtableChangeStreamCharset charset will be used to decode binary values into String column qualifiersDefaults to false. +* **useBase64Values**: Only supported for the TEXT output file format. When set to true, values will be written as Base64-encoded strings. Otherwise bigtableChangeStreamCharset charset will be used to decode binary values into String valuesDefaults to false. +* **bigtableChangeStreamMetadataInstanceId**: The Bigtable change streams metadata instance ID. Defaults to empty. +* **bigtableChangeStreamMetadataTableTableId**: The ID of the Bigtable change streams connector metadata table. If not provided, a Bigtable change streams connector metadata table is automatically created during pipeline execution. Defaults to empty. +* **bigtableChangeStreamCharset**: The Bigtable change streams charset name. Defaults to: UTF-8. +* **bigtableChangeStreamStartTimestamp**: The starting timestamp (https://tools.ietf.org/html/rfc3339), inclusive, to use for reading change streams. For example, `2022-05-05T07:59:59Z`. Defaults to the timestamp of the pipeline start time. +* **bigtableChangeStreamIgnoreColumnFamilies**: A comma-separated list of column family name changes to ignore. Defaults to empty. +* **bigtableChangeStreamIgnoreColumns**: A comma-separated list of column name changes to ignore. Defaults to empty. +* **bigtableChangeStreamName**: A unique name for the client pipeline. Lets you resume processing from the point at which a previously running pipeline stopped. Defaults to an automatically generated name. See the Dataflow job logs for the value used. +* **bigtableChangeStreamResume**: When set to `true`, a new pipeline resumes processing from the point at which a previously running pipeline with the same `bigtableChangeStreamName` value stopped. If the pipeline with the given `bigtableChangeStreamName` value has never run, a new pipeline doesn't start. When set to `false`, a new pipeline starts. If a pipeline with the same `bigtableChangeStreamName` value has already run for the given source, a new pipeline doesn't start. Defaults to `false`. +* **bigtableReadProjectId**: The Bigtable project ID. The default is the project for the Dataflow job. @@ -267,7 +267,7 @@ resource "google_dataflow_flex_template_job" "bigtable_change_streams_to_google_ name = "bigtable-change-streams-to-google-cloud-storage" region = var.region parameters = { - gcsOutputDirectory = "gs://your-bucket/your-path" + gcsOutputDirectory = "" bigtableChangeStreamAppProfile = "" bigtableReadInstanceId = "" bigtableReadTableId = "" diff --git a/v2/googlecloud-to-googlecloud/README_Bigtable_Change_Streams_to_PubSub.md b/v2/googlecloud-to-googlecloud/README_Bigtable_Change_Streams_to_PubSub.md index 07aa3daef2..552c088a28 100644 --- a/v2/googlecloud-to-googlecloud/README_Bigtable_Change_Streams_to_PubSub.md +++ b/v2/googlecloud-to-googlecloud/README_Bigtable_Change_Streams_to_PubSub.md @@ -17,33 +17,33 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **pubSubTopic** : The name of the destination Pub/Sub topic. -* **bigtableChangeStreamAppProfile** : The Bigtable application profile ID. The application profile must use single-cluster routing and allow single-row transactions. -* **bigtableReadInstanceId** : The source Bigtable instance ID. -* **bigtableReadTableId** : The source Bigtable table ID. +* **pubSubTopic**: The name of the destination Pub/Sub topic. +* **bigtableChangeStreamAppProfile**: The Bigtable application profile ID. The application profile must use single-cluster routing and allow single-row transactions. +* **bigtableReadInstanceId**: The source Bigtable instance ID. +* **bigtableReadTableId**: The source Bigtable table ID. ### Optional parameters -* **messageEncoding** : The encoding of the messages to be published to the Pub/Sub topic. When the schema of the destination topic is configured, the message encoding is determined by the topic settings. The following values are supported: `BINARY` and `JSON`. Defaults to `JSON`. -* **messageFormat** : The encoding of the messages to publish to the Pub/Sub topic. When the schema of the destination topic is configured, the message encoding is determined by the topic settings. The following values are supported: `AVRO`, `PROTOCOL_BUFFERS`, and `JSON`. The default value is `JSON`. When the `JSON` format is used, the rowKey, column, and value fields of the message are strings, the contents of which are determined by the pipeline options `useBase64Rowkeys`, `useBase64ColumnQualifiers`, `useBase64Values`, and `bigtableChangeStreamCharset`. -* **stripValues** : When set to true, the SET_CELL mutations are returned without new values set. Defaults to false. This parameter is useful when you don't need a new value to be present, also known as cache invalidation, or when values are extremely large and exceed Pub/Sub message size limits. -* **dlqDirectory** : The directory for the dead-letter queue. Records that fail to be processed are stored in this directory. Defaults to a directory under the Dataflow job temp location. In most cases, you can use the default path. -* **dlqRetryMinutes** : The number of minutes between dead-letter queue retries. Defaults to `10`. -* **dlqMaxRetries** : The dead letter maximum retries. Defaults to `5`. -* **useBase64Rowkeys** : Used with JSON message encoding. When set to `true`, the `rowKey` field is a Base64-encoded string. Otherwise, the `rowKey` is produced by using `bigtableChangeStreamCharset` to decode bytes into a string. Defaults to `false`. -* **pubSubProjectId** : The Bigtable project ID. The default is the project of the Dataflow job. -* **useBase64ColumnQualifiers** : Used with JSON message encoding. When set to `true`, the `column` field is a Base64-encoded string. Otherwise, the column is produced by using `bigtableChangeStreamCharset` to decode bytes into a string. Defaults to `false`. -* **useBase64Values** : Used with JSON message encoding. When set to `true`, the value field is a Base64-encoded string. Otherwise, the value isproduced by using `bigtableChangeStreamCharset` to decode bytes into a string. Defaults to `false`. -* **disableDlqRetries** : Whether or not to disable retries for the DLQ. Defaults to: false. -* **bigtableChangeStreamMetadataInstanceId** : The Bigtable change streams metadata instance ID. Defaults to empty. -* **bigtableChangeStreamMetadataTableTableId** : The ID of the Bigtable change streams connector metadata table. If not provided, a Bigtable change streams connector metadata table is automatically created during pipeline execution. Defaults to empty. -* **bigtableChangeStreamCharset** : The Bigtable change streams charset name. Defaults to: UTF-8. -* **bigtableChangeStreamStartTimestamp** : The starting timestamp (https://tools.ietf.org/html/rfc3339), inclusive, to use for reading change streams. For example, `2022-05-05T07:59:59Z`. Defaults to the timestamp of the pipeline start time. -* **bigtableChangeStreamIgnoreColumnFamilies** : A comma-separated list of column family name changes to ignore. Defaults to empty. -* **bigtableChangeStreamIgnoreColumns** : A comma-separated list of column name changes to ignore. Defaults to empty. -* **bigtableChangeStreamName** : A unique name for the client pipeline. Lets you resume processing from the point at which a previously running pipeline stopped. Defaults to an automatically generated name. See the Dataflow job logs for the value used. -* **bigtableChangeStreamResume** : When set to `true`, a new pipeline resumes processing from the point at which a previously running pipeline with the same `bigtableChangeStreamName` value stopped. If the pipeline with the given `bigtableChangeStreamName` value has never run, a new pipeline doesn't start. When set to `false`, a new pipeline starts. If a pipeline with the same `bigtableChangeStreamName` value has already run for the given source, a new pipeline doesn't start. Defaults to `false`. -* **bigtableReadProjectId** : The Bigtable project ID. The default is the project for the Dataflow job. +* **messageEncoding**: The encoding of the messages to be published to the Pub/Sub topic. When the schema of the destination topic is configured, the message encoding is determined by the topic settings. The following values are supported: `BINARY` and `JSON`. Defaults to `JSON`. +* **messageFormat**: The encoding of the messages to publish to the Pub/Sub topic. When the schema of the destination topic is configured, the message encoding is determined by the topic settings. The following values are supported: `AVRO`, `PROTOCOL_BUFFERS`, and `JSON`. The default value is `JSON`. When the `JSON` format is used, the rowKey, column, and value fields of the message are strings, the contents of which are determined by the pipeline options `useBase64Rowkeys`, `useBase64ColumnQualifiers`, `useBase64Values`, and `bigtableChangeStreamCharset`. +* **stripValues**: When set to `true`, the `SET_CELL` mutations are returned without new values set. Defaults to `false`. This parameter is useful when you don't need a new value to be present, also known as cache invalidation, or when values are extremely large and exceed Pub/Sub message size limits. +* **dlqDirectory**: The directory for the dead-letter queue. Records that fail to be processed are stored in this directory. Defaults to a directory under the Dataflow job temp location. In most cases, you can use the default path. +* **dlqRetryMinutes**: The number of minutes between dead-letter queue retries. Defaults to `10`. +* **dlqMaxRetries**: The dead letter maximum retries. Defaults to `5`. +* **useBase64Rowkeys**: Used with JSON message encoding. When set to `true`, the `rowKey` field is a Base64-encoded string. Otherwise, the `rowKey` is produced by using `bigtableChangeStreamCharset` to decode bytes into a string. Defaults to `false`. +* **pubSubProjectId**: The Bigtable project ID. The default is the project of the Dataflow job. +* **useBase64ColumnQualifiers**: Used with JSON message encoding. When set to `true`, the `column` field is a Base64-encoded string. Otherwise, the column is produced by using `bigtableChangeStreamCharset` to decode bytes into a string. Defaults to `false`. +* **useBase64Values**: Used with JSON message encoding. When set to `true`, the value field is a Base64-encoded string. Otherwise, the value isproduced by using `bigtableChangeStreamCharset` to decode bytes into a string. Defaults to `false`. +* **disableDlqRetries**: Whether or not to disable retries for the DLQ. Defaults to: false. +* **bigtableChangeStreamMetadataInstanceId**: The Bigtable change streams metadata instance ID. Defaults to empty. +* **bigtableChangeStreamMetadataTableTableId**: The ID of the Bigtable change streams connector metadata table. If not provided, a Bigtable change streams connector metadata table is automatically created during pipeline execution. Defaults to empty. +* **bigtableChangeStreamCharset**: The Bigtable change streams charset name. Defaults to: UTF-8. +* **bigtableChangeStreamStartTimestamp**: The starting timestamp (https://tools.ietf.org/html/rfc3339), inclusive, to use for reading change streams. For example, `2022-05-05T07:59:59Z`. Defaults to the timestamp of the pipeline start time. +* **bigtableChangeStreamIgnoreColumnFamilies**: A comma-separated list of column family name changes to ignore. Defaults to empty. +* **bigtableChangeStreamIgnoreColumns**: A comma-separated list of column name changes to ignore. Defaults to empty. +* **bigtableChangeStreamName**: A unique name for the client pipeline. Lets you resume processing from the point at which a previously running pipeline stopped. Defaults to an automatically generated name. See the Dataflow job logs for the value used. +* **bigtableChangeStreamResume**: When set to `true`, a new pipeline resumes processing from the point at which a previously running pipeline with the same `bigtableChangeStreamName` value stopped. If the pipeline with the given `bigtableChangeStreamName` value has never run, a new pipeline doesn't start. When set to `false`, a new pipeline starts. If a pipeline with the same `bigtableChangeStreamName` value has already run for the given source, a new pipeline doesn't start. Defaults to `false`. +* **bigtableReadProjectId**: The Bigtable project ID. The default is the project for the Dataflow job. diff --git a/v2/googlecloud-to-googlecloud/README_Bigtable_Change_Streams_to_Vector_Search.md b/v2/googlecloud-to-googlecloud/README_Bigtable_Change_Streams_to_Vector_Search.md index 64340f0238..6ff43b025b 100644 --- a/v2/googlecloud-to-googlecloud/README_Bigtable_Change_Streams_to_Vector_Search.md +++ b/v2/googlecloud-to-googlecloud/README_Bigtable_Change_Streams_to_Vector_Search.md @@ -17,36 +17,36 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **embeddingColumn** : The fully qualified column name where the embeddings are stored. In the format cf:col. -* **embeddingByteSize** : The byte size of each entry in the embeddings array. Use 4 for Float, and 8 for Double. Defaults to: 4. -* **vectorSearchIndex** : The Vector Search Index where changes will be streamed, in the format 'projects/{projectID}/locations/{region}/indexes/{indexID}' (no leading or trailing spaces) (Example: projects/123/locations/us-east1/indexes/456). -* **bigtableChangeStreamAppProfile** : The Bigtable application profile ID. The application profile must use single-cluster routing and allow single-row transactions. -* **bigtableReadInstanceId** : The source Bigtable instance ID. -* **bigtableReadTableId** : The source Bigtable table ID. +* **embeddingColumn**: The fully qualified column name where the embeddings are stored. In the format cf:col. +* **embeddingByteSize**: The byte size of each entry in the embeddings array. Use 4 for Float, and 8 for Double. Defaults to: 4. +* **vectorSearchIndex**: The Vector Search Index where changes will be streamed, in the format 'projects/{projectID}/locations/{region}/indexes/{indexID}' (no leading or trailing spaces) For example, `projects/123/locations/us-east1/indexes/456`. +* **bigtableChangeStreamAppProfile**: The Bigtable application profile ID. The application profile must use single-cluster routing and allow single-row transactions. +* **bigtableReadInstanceId**: The source Bigtable instance ID. +* **bigtableReadTableId**: The source Bigtable table ID. ### Optional parameters -* **bigtableMetadataTableTableId** : Table ID used for creating the metadata table. -* **crowdingTagColumn** : The fully qualified column name where the crowding tag is stored. In the format cf:col. -* **allowRestrictsMappings** : The comma separated fully qualified column names of the columns that should be used as the `allow` restricts, with their alias. In the format cf:col->alias. -* **denyRestrictsMappings** : The comma separated fully qualified column names of the columns that should be used as the `deny` restricts, with their alias. In the format cf:col->alias. -* **intNumericRestrictsMappings** : The comma separated fully qualified column names of the columns that should be used as integer `numeric_restricts`, with their alias. In the format cf:col->alias. -* **floatNumericRestrictsMappings** : The comma separated fully qualified column names of the columns that should be used as float (4 bytes) `numeric_restricts`, with their alias. In the format cf:col->alias. -* **doubleNumericRestrictsMappings** : The comma separated fully qualified column names of the columns that should be used as double (8 bytes) `numeric_restricts`, with their alias. In the format cf:col->alias. -* **upsertMaxBatchSize** : The maximum number of upserts to buffer before upserting the batch to the Vector Search Index. Batches will be sent when there are either upsertBatchSize records ready, or any record has been waiting upsertBatchDelay time has passed. (Example: 10). Defaults to: 10. -* **upsertMaxBufferDuration** : The maximum delay before a batch of upserts is sent to Vector Search.Batches will be sent when there are either upsertBatchSize records ready, or any record has been waiting upsertBatchDelay time has passed. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). (Example: 10s). Defaults to: 10s. -* **deleteMaxBatchSize** : The maximum number of deletes to buffer before deleting the batch from the Vector Search Index. Batches will be sent when there are either deleteBatchSize records ready, or any record has been waiting deleteBatchDelay time has passed. (Example: 10). Defaults to: 10. -* **deleteMaxBufferDuration** : The maximum delay before a batch of deletes is sent to Vector Search.Batches will be sent when there are either deleteBatchSize records ready, or any record has been waiting deleteBatchDelay time has passed. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). (Example: 10s). Defaults to: 10s. -* **dlqDirectory** : The path to store any unprocessed records with the reason they failed to be processed. Default is a directory under the Dataflow job's temp location. The default value is enough under most conditions. -* **bigtableChangeStreamMetadataInstanceId** : The Bigtable change streams metadata instance ID. Defaults to empty. -* **bigtableChangeStreamMetadataTableTableId** : The ID of the Bigtable change streams connector metadata table. If not provided, a Bigtable change streams connector metadata table is automatically created during pipeline execution. Defaults to empty. -* **bigtableChangeStreamCharset** : The Bigtable change streams charset name. Defaults to: UTF-8. -* **bigtableChangeStreamStartTimestamp** : The starting timestamp (https://tools.ietf.org/html/rfc3339), inclusive, to use for reading change streams. For example, `2022-05-05T07:59:59Z`. Defaults to the timestamp of the pipeline start time. -* **bigtableChangeStreamIgnoreColumnFamilies** : A comma-separated list of column family name changes to ignore. Defaults to empty. -* **bigtableChangeStreamIgnoreColumns** : A comma-separated list of column name changes to ignore. Defaults to empty. -* **bigtableChangeStreamName** : A unique name for the client pipeline. Lets you resume processing from the point at which a previously running pipeline stopped. Defaults to an automatically generated name. See the Dataflow job logs for the value used. -* **bigtableChangeStreamResume** : When set to `true`, a new pipeline resumes processing from the point at which a previously running pipeline with the same `bigtableChangeStreamName` value stopped. If the pipeline with the given `bigtableChangeStreamName` value has never run, a new pipeline doesn't start. When set to `false`, a new pipeline starts. If a pipeline with the same `bigtableChangeStreamName` value has already run for the given source, a new pipeline doesn't start. Defaults to `false`. -* **bigtableReadProjectId** : The Bigtable project ID. The default is the project for the Dataflow job. +* **bigtableMetadataTableTableId**: Table ID used for creating the metadata table. +* **crowdingTagColumn**: The fully qualified column name where the crowding tag is stored. In the format cf:col. +* **allowRestrictsMappings**: The comma separated fully qualified column names of the columns that should be used as the `allow` restricts, with their alias. In the format cf:col->alias. +* **denyRestrictsMappings**: The comma separated fully qualified column names of the columns that should be used as the `deny` restricts, with their alias. In the format cf:col->alias. +* **intNumericRestrictsMappings**: The comma separated fully qualified column names of the columns that should be used as integer `numeric_restricts`, with their alias. In the format cf:col->alias. +* **floatNumericRestrictsMappings**: The comma separated fully qualified column names of the columns that should be used as float (4 bytes) `numeric_restricts`, with their alias. In the format cf:col->alias. +* **doubleNumericRestrictsMappings**: The comma separated fully qualified column names of the columns that should be used as double (8 bytes) `numeric_restricts`, with their alias. In the format cf:col->alias. +* **upsertMaxBatchSize**: The maximum number of upserts to buffer before upserting the batch to the Vector Search Index. Batches will be sent when there are either upsertBatchSize records ready, or any record has been waiting upsertBatchDelay time has passed. For example, `10`. Defaults to: 10. +* **upsertMaxBufferDuration**: The maximum delay before a batch of upserts is sent to Vector Search.Batches will be sent when there are either upsertBatchSize records ready, or any record has been waiting upsertBatchDelay time has passed. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). For example, `10s`. Defaults to: 10s. +* **deleteMaxBatchSize**: The maximum number of deletes to buffer before deleting the batch from the Vector Search Index. Batches will be sent when there are either deleteBatchSize records ready, or any record has been waiting deleteBatchDelay time has passed. For example, `10`. Defaults to: 10. +* **deleteMaxBufferDuration**: The maximum delay before a batch of deletes is sent to Vector Search.Batches will be sent when there are either deleteBatchSize records ready, or any record has been waiting deleteBatchDelay time has passed. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). For example, `10s`. Defaults to: 10s. +* **dlqDirectory**: The path to store any unprocessed records with the reason they failed to be processed. Default is a directory under the Dataflow job's temp location. The default value is enough under most conditions. +* **bigtableChangeStreamMetadataInstanceId**: The Bigtable change streams metadata instance ID. Defaults to empty. +* **bigtableChangeStreamMetadataTableTableId**: The ID of the Bigtable change streams connector metadata table. If not provided, a Bigtable change streams connector metadata table is automatically created during pipeline execution. Defaults to empty. +* **bigtableChangeStreamCharset**: The Bigtable change streams charset name. Defaults to: UTF-8. +* **bigtableChangeStreamStartTimestamp**: The starting timestamp (https://tools.ietf.org/html/rfc3339), inclusive, to use for reading change streams. For example, `2022-05-05T07:59:59Z`. Defaults to the timestamp of the pipeline start time. +* **bigtableChangeStreamIgnoreColumnFamilies**: A comma-separated list of column family name changes to ignore. Defaults to empty. +* **bigtableChangeStreamIgnoreColumns**: A comma-separated list of column name changes to ignore. Defaults to empty. +* **bigtableChangeStreamName**: A unique name for the client pipeline. Lets you resume processing from the point at which a previously running pipeline stopped. Defaults to an automatically generated name. See the Dataflow job logs for the value used. +* **bigtableChangeStreamResume**: When set to `true`, a new pipeline resumes processing from the point at which a previously running pipeline with the same `bigtableChangeStreamName` value stopped. If the pipeline with the given `bigtableChangeStreamName` value has never run, a new pipeline doesn't start. When set to `false`, a new pipeline starts. If a pipeline with the same `bigtableChangeStreamName` value has already run for the given source, a new pipeline doesn't start. Defaults to `false`. +* **bigtableReadProjectId**: The Bigtable project ID. The default is the project for the Dataflow job. @@ -288,7 +288,7 @@ resource "google_dataflow_flex_template_job" "bigtable_change_streams_to_vector_ parameters = { embeddingColumn = "" embeddingByteSize = "4" - vectorSearchIndex = "projects/123/locations/us-east1/indexes/456" + vectorSearchIndex = "" bigtableChangeStreamAppProfile = "" bigtableReadInstanceId = "" bigtableReadTableId = "" diff --git a/v2/googlecloud-to-googlecloud/README_Cloud_PubSub_to_Avro_Flex.md b/v2/googlecloud-to-googlecloud/README_Cloud_PubSub_to_Avro_Flex.md index 6cd8460fea..2ce705b0e7 100644 --- a/v2/googlecloud-to-googlecloud/README_Cloud_PubSub_to_Avro_Flex.md +++ b/v2/googlecloud-to-googlecloud/README_Cloud_PubSub_to_Avro_Flex.md @@ -18,23 +18,23 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **outputDirectory** : The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse directory path for date & time formatters. -* **avroTempDirectory** : Directory for temporary Avro files. +* **outputDirectory**: The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse directory path for date & time formatters. +* **avroTempDirectory**: Directory for temporary Avro files. ### Optional parameters -* **inputSubscription** : Pub/Sub subscription to read the input from, in the format of 'projects/your-project-id/subscriptions/your-subscription-name' (Example: projects/your-project-id/subscriptions/your-subscription-name). -* **inputTopic** : Pub/Sub topic to read the input from, in the format of 'projects/your-project-id/topics/your-topic-name'. -* **outputFilenamePrefix** : The prefix to place on each windowed file. Defaults to: output. -* **outputFilenameSuffix** : The suffix to place on each windowed file. Typically a file extension such as .txt or .csv. Defaults to empty. -* **outputShardTemplate** : The shard template defines the dynamic portion of each windowed file. By default, the pipeline uses a single shard for output to the file system within each window. This means that all data outputs into a single file per window. The `outputShardTemplate` defaults to `W-P-SS-of-NN` where `W` is the window date range, `P` is the pane info, `S` is the shard number, and `N` is the number of shards. In case of a single file, the `SS-of-NN` portion of the `outputShardTemplate` is `00-of-01`. -* **numShards** : The maximum number of output shards produced when writing. A higher number of shards means higher throughput for writing to Cloud Storage, but potentially higher data aggregation cost across shards when processing output Cloud Storage files. Defaults to: 0. -* **windowDuration** : The window duration is the interval in which data is written to the output directory. Configure the duration based on the pipeline's throughput. For example, a higher throughput might require smaller window sizes so that the data fits into memory. Defaults to 5m (5 minutes), with a minimum of 1s (1 second). Allowed formats are: [int]s (for seconds, example: 5s), [int]m (for minutes, example: 12m), [int]h (for hours, example: 2h). (Example: 5m). -* **yearPattern** : Pattern for formatting the year. Must be one or more of 'y' or 'Y'. Case makes no difference in the year. The pattern can be optionally wrapped by characters that aren't either alphanumeric or the directory ('/') character. Defaults to 'YYYY'. -* **monthPattern** : Pattern for formatting the month. Must be one or more of the 'M' character. The pattern can be optionally wrapped by characters that aren't alphanumeric or the directory ('/') character. Defaults to 'MM'. -* **dayPattern** : Pattern for formatting the day. Must be one or more of 'd' for day of month or 'D' for day of year. Case makes no difference in the year. The pattern can be optionally wrapped by characters that aren't either alphanumeric or the directory ('/') character. Defaults to 'dd'. -* **hourPattern** : Pattern for formatting the hour. Must be one or more of the 'H' character. The pattern can be optionally wrapped by characters that aren't alphanumeric or the directory ('/') character. Defaults to 'HH'. -* **minutePattern** : Pattern for formatting the minute. Must be one or more of the 'm' character. The pattern can be optionally wrapped by characters that aren't alphanumeric or the directory ('/') character. Defaults to 'mm'. +* **inputSubscription**: Pub/Sub subscription to read the input from, in the format of 'projects/your-project-id/subscriptions/your-subscription-name' For example, `projects/your-project-id/subscriptions/your-subscription-name`. +* **inputTopic**: Pub/Sub topic to read the input from, in the format of 'projects/your-project-id/topics/your-topic-name'. +* **outputFilenamePrefix**: The prefix to place on each windowed file. Defaults to: output. +* **outputFilenameSuffix**: The suffix to place on each windowed file. Typically a file extension such as .txt or .csv. Defaults to empty. +* **outputShardTemplate**: The shard template defines the dynamic portion of each windowed file. By default, the pipeline uses a single shard for output to the file system within each window. This means that all data outputs into a single file per window. The `outputShardTemplate` defaults to `W-P-SS-of-NN` where `W` is the window date range, `P` is the pane info, `S` is the shard number, and `N` is the number of shards. In case of a single file, the `SS-of-NN` portion of the `outputShardTemplate` is `00-of-01`. +* **numShards**: The maximum number of output shards produced when writing. A higher number of shards means higher throughput for writing to Cloud Storage, but potentially higher data aggregation cost across shards when processing output Cloud Storage files. Defaults to: 0. +* **windowDuration**: The window duration is the interval in which data is written to the output directory. Configure the duration based on the pipeline's throughput. For example, a higher throughput might require smaller window sizes so that the data fits into memory. Defaults to `5m` (5 minutes), with a minimum of `1s` (1 second). Allowed formats are: `[int]s` (for seconds, example: `5s`), `[int]m` (for minutes, example: `12m`), `[int]h` (for hours, example: `2h`). For example, `5m`. +* **yearPattern**: Pattern for formatting the year. Must be one or more of `y` or `Y`. Case makes no difference in the year. The pattern can be optionally wrapped by characters that aren't either alphanumeric or the directory (`/`) character. Defaults to `YYYY`. +* **monthPattern**: Pattern for formatting the month. Must be one or more of the `M` character. The pattern can be optionally wrapped by characters that aren't alphanumeric or the directory (`/`) character. Defaults to `MM`. +* **dayPattern**: Pattern for formatting the day. Must be one or more of `d` for day of month or `D` for day of year. Case makes no difference in the year. The pattern can be optionally wrapped by characters that aren't either alphanumeric or the directory (`/`) character. Defaults to `dd`. +* **hourPattern**: Pattern for formatting the hour. Must be one or more of the `H` character. The pattern can be optionally wrapped by characters that aren't alphanumeric or the directory (`/`) character. Defaults to `HH`. +* **minutePattern**: Pattern for formatting the minute. Must be one or more of the `m` character. The pattern can be optionally wrapped by characters that aren't alphanumeric or the directory (`/`) character. Defaults to `mm`. @@ -237,7 +237,7 @@ resource "google_dataflow_flex_template_job" "cloud_pubsub_to_avro_flex" { parameters = { outputDirectory = "" avroTempDirectory = "" - # inputSubscription = "projects/your-project-id/subscriptions/your-subscription-name" + # inputSubscription = "" # inputTopic = "" # outputFilenamePrefix = "output" # outputFilenameSuffix = "" diff --git a/v2/googlecloud-to-googlecloud/README_Cloud_PubSub_to_GCS_Text_Flex.md b/v2/googlecloud-to-googlecloud/README_Cloud_PubSub_to_GCS_Text_Flex.md index f7384d273f..0407ba3b81 100644 --- a/v2/googlecloud-to-googlecloud/README_Cloud_PubSub_to_GCS_Text_Flex.md +++ b/v2/googlecloud-to-googlecloud/README_Cloud_PubSub_to_GCS_Text_Flex.md @@ -20,23 +20,23 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **outputDirectory** : The path and filename prefix to write write output files to. This value must end in a slash. (Example: gs://your-bucket/your-path). +* **outputDirectory**: The path and filename prefix to write write output files to. This value must end in a slash. For example, `gs://your-bucket/your-path/`. ### Optional parameters -* **inputTopic** : The Pub/Sub topic to read the input from. The topic name should be in the format `projects//topics/`. If this parameter is provided don't use `inputSubscription`. (Example: projects/your-project-id/topics/your-topic-name). -* **inputSubscription** : The Pub/Sub subscription to read the input from. The subscription name uses the format `projects//subscription/`. If this parameter is provided, don't use `inputTopic`. (Example: projects/your-project-id/subscriptions/your-subscription-name). -* **userTempLocation** : The user provided directory to output temporary files to. Must end with a slash. -* **outputFilenamePrefix** : The prefix to place on each windowed file. (Example: output-). Defaults to: output. -* **outputFilenameSuffix** : The suffix to place on each windowed file, typically a file extension such as `.txt` or `.csv`. (Example: .txt). Defaults to empty. -* **outputShardTemplate** : The shard template defines the dynamic portion of each windowed file. By default, the pipeline uses a single shard for output to the file system within each window. This means that all data outputs into a single file per window. The `outputShardTemplate` defaults to `W-P-SS-of-NN` where `W` is the window date range, `P` is the pane info, `S` is the shard number, and `N` is the number of shards. In case of a single file, the `SS-of-NN` portion of the `outputShardTemplate` is `00-of-01`. -* **numShards** : The maximum number of output shards produced when writing. A higher number of shards means higher throughput for writing to Cloud Storage, but potentially higher data aggregation cost across shards when processing output Cloud Storage files. Defaults to: 0. -* **windowDuration** : The window duration is the interval in which data is written to the output directory. Configure the duration based on the pipeline's throughput. For example, a higher throughput might require smaller window sizes so that the data fits into memory. Defaults to 5m (5 minutes), with a minimum of 1s (1 second). Allowed formats are: [int]s (for seconds, example: 5s), [int]m (for minutes, example: 12m), [int]h (for hours, example: 2h). (Example: 5m). -* **yearPattern** : Pattern for formatting the year. Must be one or more of 'y' or 'Y'. Case makes no difference in the year. The pattern can be optionally wrapped by characters that aren't either alphanumeric or the directory ('/') character. Defaults to 'YYYY'. -* **monthPattern** : Pattern for formatting the month. Must be one or more of the 'M' character. The pattern can be optionally wrapped by characters that aren't alphanumeric or the directory ('/') character. Defaults to 'MM'. -* **dayPattern** : Pattern for formatting the day. Must be one or more of 'd' for day of month or 'D' for day of year. Case makes no difference in the year. The pattern can be optionally wrapped by characters that aren't either alphanumeric or the directory ('/') character. Defaults to 'dd'. -* **hourPattern** : Pattern for formatting the hour. Must be one or more of the 'H' character. The pattern can be optionally wrapped by characters that aren't alphanumeric or the directory ('/') character. Defaults to 'HH'. -* **minutePattern** : Pattern for formatting the minute. Must be one or more of the 'm' character. The pattern can be optionally wrapped by characters that aren't alphanumeric or the directory ('/') character. Defaults to 'mm'. +* **inputTopic**: The Pub/Sub topic to read the input from. If this parameter is provided don't use `inputSubscription`. For example, `projects//topics/`. +* **inputSubscription**: The Pub/Sub subscription to read the input from. If this parameter is provided, don't use `inputTopic`. For example, `projects//subscription/`. +* **userTempLocation**: The user provided directory to output temporary files to. Must end with a slash. +* **outputFilenamePrefix**: The prefix to place on each windowed file. For example, `output-`. Defaults to: output. +* **outputFilenameSuffix**: The suffix to place on each windowed file, typically a file extension such as `.txt` or `.csv`. For example, `.txt`. Defaults to empty. +* **outputShardTemplate**: The shard template defines the dynamic portion of each windowed file. By default, the pipeline uses a single shard for output to the file system within each window. This means that all data outputs into a single file per window. The `outputShardTemplate` defaults to `W-P-SS-of-NN` where `W` is the window date range, `P` is the pane info, `S` is the shard number, and `N` is the number of shards. In case of a single file, the `SS-of-NN` portion of the `outputShardTemplate` is `00-of-01`. +* **numShards**: The maximum number of output shards produced when writing. A higher number of shards means higher throughput for writing to Cloud Storage, but potentially higher data aggregation cost across shards when processing output Cloud Storage files. Defaults to: 0. +* **windowDuration**: The window duration is the interval in which data is written to the output directory. Configure the duration based on the pipeline's throughput. For example, a higher throughput might require smaller window sizes so that the data fits into memory. Defaults to `5m` (5 minutes), with a minimum of `1s` (1 second). Allowed formats are: `[int]s` (for seconds, example: `5s`), `[int]m` (for minutes, example: `12m`), `[int]h` (for hours, example: `2h`). For example, `5m`. +* **yearPattern**: Pattern for formatting the year. Must be one or more of `y` or `Y`. Case makes no difference in the year. The pattern can be optionally wrapped by characters that aren't either alphanumeric or the directory (`/`) character. Defaults to `YYYY`. +* **monthPattern**: Pattern for formatting the month. Must be one or more of the `M` character. The pattern can be optionally wrapped by characters that aren't alphanumeric or the directory (`/`) character. Defaults to `MM`. +* **dayPattern**: Pattern for formatting the day. Must be one or more of `d` for day of month or `D` for day of year. Case makes no difference in the year. The pattern can be optionally wrapped by characters that aren't either alphanumeric or the directory (`/`) character. Defaults to `dd`. +* **hourPattern**: Pattern for formatting the hour. Must be one or more of the `H` character. The pattern can be optionally wrapped by characters that aren't alphanumeric or the directory (`/`) character. Defaults to `HH`. +* **minutePattern**: Pattern for formatting the minute. Must be one or more of the `m` character. The pattern can be optionally wrapped by characters that aren't alphanumeric or the directory (`/`) character. Defaults to `mm`. @@ -237,12 +237,12 @@ resource "google_dataflow_flex_template_job" "cloud_pubsub_to_gcs_text_flex" { name = "cloud-pubsub-to-gcs-text-flex" region = var.region parameters = { - outputDirectory = "gs://your-bucket/your-path" - # inputTopic = "projects/your-project-id/topics/your-topic-name" - # inputSubscription = "projects/your-project-id/subscriptions/your-subscription-name" + outputDirectory = "" + # inputTopic = "" + # inputSubscription = "" # userTempLocation = "" - # outputFilenamePrefix = "output-" - # outputFilenameSuffix = ".txt" + # outputFilenamePrefix = "output" + # outputFilenameSuffix = "" # outputShardTemplate = "W-P-SS-of-NN" # numShards = "0" # windowDuration = "5m" diff --git a/v2/googlecloud-to-googlecloud/README_Cloud_Spanner_to_BigQuery_Flex.md b/v2/googlecloud-to-googlecloud/README_Cloud_Spanner_to_BigQuery_Flex.md index 5ddbabacbb..fe09631ae9 100644 --- a/v2/googlecloud-to-googlecloud/README_Cloud_Spanner_to_BigQuery_Flex.md +++ b/v2/googlecloud-to-googlecloud/README_Cloud_Spanner_to_BigQuery_Flex.md @@ -17,21 +17,21 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **spannerInstanceId** : The instance ID of the Spanner database to read from. -* **spannerDatabaseId** : The database ID of the Spanner database to export. -* **outputTableSpec** : The BigQuery output table location to write the output to. For example, `:.`.Depending on the `createDisposition` specified, the output table might be created automatically using the user provided Avro schema. +* **spannerInstanceId**: The instance ID of the Spanner database to read from. +* **spannerDatabaseId**: The database ID of the Spanner database to export. +* **outputTableSpec**: The BigQuery output table location to write the output to. For example, `:.`.Depending on the `createDisposition` specified, the output table might be created automatically using the user provided Avro schema. ### Optional parameters -* **spannerProjectId** : The ID of the project that the Spanner database resides in. The default value for this parameter is the project where the Dataflow pipeline is running. -* **spannerTableId** : The table name of the Spanner database to export. Ignored if sqlQuery is set. -* **spannerRpcPriority** : The request priority (https://cloud.google.com/spanner/docs/reference/rest/v1/RequestOptions) for Spanner calls. Possible values are `HIGH`, `MEDIUM`, and `LOW`. The default value is `HIGH`. -* **sqlQuery** : The SQL query to use to read data from the Spanner database. Required if spannerTableId is empty. -* **bigQuerySchemaPath** : The Cloud Storage path (gs://) to the JSON file that defines your BigQuery schema. (Example: gs://your-bucket/your-schema.json). -* **writeDisposition** : The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. -* **createDisposition** : The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. -* **useStorageWriteApi** : If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **spannerProjectId**: The ID of the project that the Spanner database resides in. The default value for this parameter is the project where the Dataflow pipeline is running. +* **spannerTableId**: The table name of the Spanner database to export. Ignored if sqlQuery is set. +* **spannerRpcPriority**: The request priority (https://cloud.google.com/spanner/docs/reference/rest/v1/RequestOptions) for Spanner calls. Possible values are `HIGH`, `MEDIUM`, and `LOW`. The default value is `HIGH`. +* **sqlQuery**: The SQL query to use to read data from the Spanner database. Required if spannerTableId is empty. +* **bigQuerySchemaPath**: The Cloud Storage path (gs://) to the JSON file that defines your BigQuery schema. For example, `gs://your-bucket/your-schema.json`. +* **writeDisposition**: The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. +* **createDisposition**: The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. +* **useStorageWriteApi**: If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. @@ -233,7 +233,7 @@ resource "google_dataflow_flex_template_job" "cloud_spanner_to_bigquery_flex" { # spannerTableId = "" # spannerRpcPriority = "" # sqlQuery = "" - # bigQuerySchemaPath = "gs://your-bucket/your-schema.json" + # bigQuerySchemaPath = "" # writeDisposition = "WRITE_APPEND" # createDisposition = "CREATE_IF_NEEDED" # useStorageWriteApi = "false" diff --git a/v2/googlecloud-to-googlecloud/README_Firestore_to_BigQuery_Flex.md b/v2/googlecloud-to-googlecloud/README_Firestore_to_BigQuery_Flex.md index 9da3ecd774..1e90e0564f 100644 --- a/v2/googlecloud-to-googlecloud/README_Firestore_to_BigQuery_Flex.md +++ b/v2/googlecloud-to-googlecloud/README_Firestore_to_BigQuery_Flex.md @@ -13,21 +13,21 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **outputTableSpec** : BigQuery table location to write the output to. The name should be in the format `:.`. The table's schema must match input objects. -* **bigQueryLoadingTemporaryDirectory** : Temporary directory for BigQuery loading process (Example: gs://your-bucket/your-files/temp_dir). -* **firestoreReadGqlQuery** : Specifies which Firestore entities to read. Ex: ‘SELECT * FROM MyKind’. -* **firestoreReadProjectId** : The Google Cloud project ID of the Firestore instance to read from. +* **outputTableSpec**: BigQuery table location to write the output to. The name should be in the format `:.`. The table's schema must match input objects. +* **bigQueryLoadingTemporaryDirectory**: Temporary directory for BigQuery loading process For example, `gs://your-bucket/your-files/temp_dir`. +* **firestoreReadGqlQuery**: Specifies which Firestore entities to read. Ex: ‘SELECT * FROM MyKind’. +* **firestoreReadProjectId**: The Google Cloud project ID of the Firestore instance to read from. ### Optional parameters -* **bigQuerySchemaPath** : The Cloud Storage path for the BigQuery JSON schema. If `createDisposition` is not set, or set to CREATE_IF_NEEDED, this parameter must be specified. (Example: gs://your-bucket/your-schema.json). -* **firestoreReadNamespace** : Namespace of requested Firestore entities. Leave blank to use default namespace. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. (Example: gs://my-bucket/my-udfs/my_file.js). -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **useStorageWriteApi** : If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. -* **writeDisposition** : The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. -* **createDisposition** : The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. +* **bigQuerySchemaPath**: The Cloud Storage path for the BigQuery JSON schema. If `createDisposition` is not set, or set to CREATE_IF_NEEDED, this parameter must be specified. For example, `gs://your-bucket/your-schema.json`. +* **firestoreReadNamespace**: Namespace of requested Firestore entities. Leave blank to use default namespace. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **useStorageWriteApi**: If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **writeDisposition**: The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. +* **createDisposition**: The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. ## User-Defined functions (UDFs) @@ -233,12 +233,12 @@ resource "google_dataflow_flex_template_job" "firestore_to_bigquery_flex" { region = var.region parameters = { outputTableSpec = "" - bigQueryLoadingTemporaryDirectory = "gs://your-bucket/your-files/temp_dir" + bigQueryLoadingTemporaryDirectory = "" firestoreReadGqlQuery = "" firestoreReadProjectId = "" - # bigQuerySchemaPath = "gs://your-bucket/your-schema.json" + # bigQuerySchemaPath = "" # firestoreReadNamespace = "" - # javascriptTextTransformGcsPath = "gs://my-bucket/my-udfs/my_file.js" + # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" # useStorageWriteApi = "false" # useStorageWriteApiAtLeastOnce = "false" diff --git a/v2/googlecloud-to-googlecloud/README_Firestore_to_BigQuery_Xlang.md b/v2/googlecloud-to-googlecloud/README_Firestore_to_BigQuery_Xlang.md index 9b8ade772d..7c409d7f54 100644 --- a/v2/googlecloud-to-googlecloud/README_Firestore_to_BigQuery_Xlang.md +++ b/v2/googlecloud-to-googlecloud/README_Firestore_to_BigQuery_Xlang.md @@ -13,21 +13,21 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **outputTableSpec** : BigQuery table location to write the output to. The name should be in the format `:.`. The table's schema must match input objects. -* **bigQueryLoadingTemporaryDirectory** : Temporary directory for BigQuery loading process (Example: gs://your-bucket/your-files/temp_dir). -* **firestoreReadGqlQuery** : Specifies which Firestore entities to read. Ex: ‘SELECT * FROM MyKind’. -* **firestoreReadProjectId** : The Google Cloud project ID of the Firestore instance to read from. +* **outputTableSpec**: BigQuery table location to write the output to. The name should be in the format `:.`. The table's schema must match input objects. +* **bigQueryLoadingTemporaryDirectory**: Temporary directory for BigQuery loading process For example, `gs://your-bucket/your-files/temp_dir`. +* **firestoreReadGqlQuery**: Specifies which Firestore entities to read. Ex: ‘SELECT * FROM MyKind’. +* **firestoreReadProjectId**: The Google Cloud project ID of the Firestore instance to read from. ### Optional parameters -* **bigQuerySchemaPath** : The Cloud Storage path for the BigQuery JSON schema. If `createDisposition` is not set, or set to CREATE_IF_NEEDED, this parameter must be specified. (Example: gs://your-bucket/your-schema.json). -* **firestoreReadNamespace** : Namespace of requested Firestore entities. Leave blank to use default namespace. -* **pythonExternalTextTransformGcsPath** : The Cloud Storage path pattern for the Python code containing your user-defined functions. (Example: gs://your-bucket/your-function.py). -* **pythonExternalTextTransformFunctionName** : The name of the function to call from your Python file. Use only letters, digits, and underscores. (Example: 'transform' or 'transform_udf1'). -* **useStorageWriteApi** : If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. -* **writeDisposition** : The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. -* **createDisposition** : The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. +* **bigQuerySchemaPath**: The Cloud Storage path for the BigQuery JSON schema. If `createDisposition` is not set, or set to CREATE_IF_NEEDED, this parameter must be specified. For example, `gs://your-bucket/your-schema.json`. +* **firestoreReadNamespace**: Namespace of requested Firestore entities. Leave blank to use default namespace. +* **pythonExternalTextTransformGcsPath**: The Cloud Storage path pattern for the Python code containing your user-defined functions. For example, `gs://your-bucket/your-function.py`. +* **pythonExternalTextTransformFunctionName**: The name of the function to call from your Python file. Use only letters, digits, and underscores. For example, `'transform' or 'transform_udf1'`. +* **useStorageWriteApi**: If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **writeDisposition**: The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. +* **createDisposition**: The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. @@ -223,13 +223,13 @@ resource "google_dataflow_flex_template_job" "firestore_to_bigquery_xlang" { region = var.region parameters = { outputTableSpec = "" - bigQueryLoadingTemporaryDirectory = "gs://your-bucket/your-files/temp_dir" + bigQueryLoadingTemporaryDirectory = "" firestoreReadGqlQuery = "" firestoreReadProjectId = "" - # bigQuerySchemaPath = "gs://your-bucket/your-schema.json" + # bigQuerySchemaPath = "" # firestoreReadNamespace = "" - # pythonExternalTextTransformGcsPath = "gs://your-bucket/your-function.py" - # pythonExternalTextTransformFunctionName = "'transform' or 'transform_udf1'" + # pythonExternalTextTransformGcsPath = "" + # pythonExternalTextTransformFunctionName = "" # useStorageWriteApi = "false" # useStorageWriteApiAtLeastOnce = "false" # writeDisposition = "WRITE_APPEND" diff --git a/v2/googlecloud-to-googlecloud/README_GCS_Text_to_BigQuery_Flex.md b/v2/googlecloud-to-googlecloud/README_GCS_Text_to_BigQuery_Flex.md index 0e550f728a..67537d2a1e 100644 --- a/v2/googlecloud-to-googlecloud/README_GCS_Text_to_BigQuery_Flex.md +++ b/v2/googlecloud-to-googlecloud/README_GCS_Text_to_BigQuery_Flex.md @@ -19,17 +19,17 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputFilePattern** : The gs:// path to the text in Cloud Storage you'd like to process. (Example: gs://your-bucket/your-file.txt). -* **JSONPath** : The gs:// path to the JSON file that defines your BigQuery schema, stored in Cloud Storage. (Example: gs://your-bucket/your-schema.json). -* **outputTable** : The location of the BigQuery table to use to store the processed data. If you reuse an existing table, it is overwritten. (Example: :.). -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the `.js` file that defines the JavaScript user-defined function (UDF) you want to use. (Example: gs://your-bucket/your-transforms/*.js). -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) that you want to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples) (Example: transform_udf1). -* **bigQueryLoadingTemporaryDirectory** : Temporary directory for BigQuery loading process. (Example: gs://your-bucket/your-files/temp-dir). +* **inputFilePattern**: The gs:// path to the text in Cloud Storage you'd like to process. For example, `gs://your-bucket/your-file.txt`. +* **JSONPath**: The gs:// path to the JSON file that defines your BigQuery schema, stored in Cloud Storage. For example, `gs://your-bucket/your-schema.json`. +* **outputTable**: The location of the BigQuery table to use to store the processed data. If you reuse an existing table, it is overwritten. For example, `:.`. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the `.js` file that defines the JavaScript user-defined function (UDF) you want to use. For example, `gs://your-bucket/your-transforms/*.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) that you want to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples) For example, `transform_udf1`. +* **bigQueryLoadingTemporaryDirectory**: Temporary directory for BigQuery loading process. For example, `gs://your-bucket/your-files/temp-dir`. ### Optional parameters -* **useStorageWriteApi** : If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **useStorageWriteApi**: If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. ## User-Defined functions (UDFs) @@ -222,12 +222,12 @@ resource "google_dataflow_flex_template_job" "gcs_text_to_bigquery_flex" { name = "gcs-text-to-bigquery-flex" region = var.region parameters = { - inputFilePattern = "gs://your-bucket/your-file.txt" - JSONPath = "gs://your-bucket/your-schema.json" - outputTable = ":." - javascriptTextTransformGcsPath = "gs://your-bucket/your-transforms/*.js" - javascriptTextTransformFunctionName = "transform_udf1" - bigQueryLoadingTemporaryDirectory = "gs://your-bucket/your-files/temp-dir" + inputFilePattern = "" + JSONPath = "" + outputTable = "" + javascriptTextTransformGcsPath = "" + javascriptTextTransformFunctionName = "" + bigQueryLoadingTemporaryDirectory = "" # useStorageWriteApi = "false" # useStorageWriteApiAtLeastOnce = "false" } diff --git a/v2/googlecloud-to-googlecloud/README_GCS_Text_to_BigQuery_Xlang.md b/v2/googlecloud-to-googlecloud/README_GCS_Text_to_BigQuery_Xlang.md index bc4dc83fbb..d5eb89cf5b 100644 --- a/v2/googlecloud-to-googlecloud/README_GCS_Text_to_BigQuery_Xlang.md +++ b/v2/googlecloud-to-googlecloud/README_GCS_Text_to_BigQuery_Xlang.md @@ -19,17 +19,17 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputFilePattern** : The gs:// path to the text in Cloud Storage you'd like to process. (Example: gs://your-bucket/your-file.txt). -* **JSONPath** : The gs:// path to the JSON file that defines your BigQuery schema, stored in Cloud Storage. (Example: gs://your-bucket/your-schema.json). -* **outputTable** : The location of the BigQuery table to use to store the processed data. If you reuse an existing table, it is overwritten. (Example: :.). -* **bigQueryLoadingTemporaryDirectory** : Temporary directory for BigQuery loading process. (Example: gs://your-bucket/your-files/temp-dir). +* **inputFilePattern**: The gs:// path to the text in Cloud Storage you'd like to process. For example, `gs://your-bucket/your-file.txt`. +* **JSONPath**: The gs:// path to the JSON file that defines your BigQuery schema, stored in Cloud Storage. For example, `gs://your-bucket/your-schema.json`. +* **outputTable**: The location of the BigQuery table to use to store the processed data. If you reuse an existing table, it is overwritten. For example, `:.`. +* **bigQueryLoadingTemporaryDirectory**: Temporary directory for BigQuery loading process. For example, `gs://your-bucket/your-files/temp-dir`. ### Optional parameters -* **useStorageWriteApi** : If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. -* **pythonExternalTextTransformGcsPath** : The Cloud Storage path pattern for the Python code containing your user-defined functions. (Example: gs://your-bucket/your-function.py). -* **pythonExternalTextTransformFunctionName** : The name of the function to call from your Python file. Use only letters, digits, and underscores. (Example: 'transform' or 'transform_udf1'). +* **useStorageWriteApi**: If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **pythonExternalTextTransformGcsPath**: The Cloud Storage path pattern for the Python code containing your user-defined functions. For example, `gs://your-bucket/your-function.py`. +* **pythonExternalTextTransformFunctionName**: The name of the function to call from your Python file. Use only letters, digits, and underscores. For example, `'transform' or 'transform_udf1'`. @@ -212,14 +212,14 @@ resource "google_dataflow_flex_template_job" "gcs_text_to_bigquery_xlang" { name = "gcs-text-to-bigquery-xlang" region = var.region parameters = { - inputFilePattern = "gs://your-bucket/your-file.txt" - JSONPath = "gs://your-bucket/your-schema.json" - outputTable = ":." - bigQueryLoadingTemporaryDirectory = "gs://your-bucket/your-files/temp-dir" + inputFilePattern = "" + JSONPath = "" + outputTable = "" + bigQueryLoadingTemporaryDirectory = "" # useStorageWriteApi = "false" # useStorageWriteApiAtLeastOnce = "false" - # pythonExternalTextTransformGcsPath = "gs://your-bucket/your-function.py" - # pythonExternalTextTransformFunctionName = "'transform' or 'transform_udf1'" + # pythonExternalTextTransformGcsPath = "" + # pythonExternalTextTransformFunctionName = "" } } ``` diff --git a/v2/googlecloud-to-googlecloud/README_PubSub_to_BigQuery_Auto.md b/v2/googlecloud-to-googlecloud/README_PubSub_to_BigQuery_Auto.md index 239d289dbd..952ab809b5 100644 --- a/v2/googlecloud-to-googlecloud/README_PubSub_to_BigQuery_Auto.md +++ b/v2/googlecloud-to-googlecloud/README_PubSub_to_BigQuery_Auto.md @@ -15,22 +15,22 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputSubscription** : Pub/Sub subscription to read the input from, in the format of 'projects/your-project-id/subscriptions/your-subscription-name'. -* **outputTableSpec** : BigQuery table location to write the output to. The table's schema must match the input JSON objects. +* **inputSubscription**: Pub/Sub subscription to read the input from, in the format of 'projects/your-project-id/subscriptions/your-subscription-name'. +* **outputTableSpec**: BigQuery table location to write the output to. The table's schema must match the input JSON objects. ### Optional parameters -* **bigQuerySchemaPath** : sample text. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. (Example: gs://my-bucket/my-udfs/my_file.js). -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **javascriptTextTransformReloadIntervalMinutes** : Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is 0, UDF reloading is disabled. The default value is 0. -* **outputDeadletterTable** : Messages failed to reach the output table for all kind of reasons (e.g., mismatched schema, malformed json) are written to this table. It should be in the format of "your-project-id:your-dataset.your-table-name". If it doesn't exist, it will be created during pipeline execution. If not specified, "{outputTableSpec}_error_records" is used instead. -* **writeDisposition** : The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. -* **createDisposition** : The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. -* **useStorageWriteApi** : If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. -* **numStorageWriteApiStreams** : When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. -* **storageWriteApiTriggeringFrequencySec** : When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. +* **bigQuerySchemaPath**: sample text. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **javascriptTextTransformReloadIntervalMinutes**: Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is `0`, UDF reloading is disabled. The default value is `0`. +* **outputDeadletterTable**: Messages failed to reach the output table for all kind of reasons (e.g., mismatched schema, malformed json) are written to this table. It should be in the format of "your-project-id:your-dataset.your-table-name". If it doesn't exist, it will be created during pipeline execution. If not specified, "{outputTableSpec}_error_records" is used instead. +* **writeDisposition**: The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. +* **createDisposition**: The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. +* **useStorageWriteApi**: If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **numStorageWriteApiStreams**: When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. +* **storageWriteApiTriggeringFrequencySec**: When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. ## User-Defined functions (UDFs) @@ -241,7 +241,7 @@ resource "google_dataflow_flex_template_job" "pubsub_to_bigquery_auto" { inputSubscription = "" outputTableSpec = "" # bigQuerySchemaPath = "" - # javascriptTextTransformGcsPath = "gs://my-bucket/my-udfs/my_file.js" + # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" # javascriptTextTransformReloadIntervalMinutes = "0" # outputDeadletterTable = "" diff --git a/v2/googlecloud-to-googlecloud/README_PubSub_to_BigQuery_Flex.md b/v2/googlecloud-to-googlecloud/README_PubSub_to_BigQuery_Flex.md index 6fed071961..f8ce04277d 100644 --- a/v2/googlecloud-to-googlecloud/README_PubSub_to_BigQuery_Flex.md +++ b/v2/googlecloud-to-googlecloud/README_PubSub_to_BigQuery_Flex.md @@ -20,20 +20,20 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **outputTableSpec** : The BigQuery table to write to, formatted as `"PROJECT_ID:DATASET_NAME.TABLE_NAME"`. +* **outputTableSpec**: The BigQuery table to write to, formatted as `PROJECT_ID:DATASET_NAME.TABLE_NAME`. ### Optional parameters -* **inputTopic** : The Pub/Sub topic to read from, formatted as `"projects//topics/"`. -* **inputSubscription** : The Pub/Sub subscription to read from, formatted as `"projects//subscriptions/"`. -* **outputDeadletterTable** : The BigQuery table to use for messages that failed to reach the output table, formatted as `"PROJECT_ID:DATASET_NAME.TABLE_NAME"`. If the table doesn't exist, it is created when the pipeline runs. If this parameter is not specified, the value `"OUTPUT_TABLE_SPEC_error_records"` is used instead. -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to true. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. -* **useStorageWriteApi** : If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **numStorageWriteApiStreams** : When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. -* **storageWriteApiTriggeringFrequencySec** : When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. (Example: gs://my-bucket/my-udfs/my_file.js). -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **javascriptTextTransformReloadIntervalMinutes** : Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is 0, UDF reloading is disabled. The default value is 0. +* **inputTopic**: The Pub/Sub topic to read from, formatted as `projects//topics/`. +* **inputSubscription**: The Pub/Sub subscription to read from, formatted as `projects//subscriptions/`. +* **outputDeadletterTable**: The BigQuery table to use for messages that failed to reach the output table, formatted as `PROJECT_ID:DATASET_NAME.TABLE_NAME`. If the table doesn't exist, it is created when the pipeline runs. If this parameter is not specified, the value `OUTPUT_TABLE_SPEC_error_records` is used instead. +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to true. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **useStorageWriteApi**: If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **numStorageWriteApiStreams**: When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. +* **storageWriteApiTriggeringFrequencySec**: When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **javascriptTextTransformReloadIntervalMinutes**: Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is `0`, UDF reloading is disabled. The default value is `0`. ## User-Defined functions (UDFs) @@ -243,7 +243,7 @@ resource "google_dataflow_flex_template_job" "pubsub_to_bigquery_flex" { # useStorageWriteApi = "false" # numStorageWriteApiStreams = "0" # storageWriteApiTriggeringFrequencySec = "" - # javascriptTextTransformGcsPath = "gs://my-bucket/my-udfs/my_file.js" + # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" # javascriptTextTransformReloadIntervalMinutes = "0" } diff --git a/v2/googlecloud-to-googlecloud/README_PubSub_to_BigQuery_Xlang.md b/v2/googlecloud-to-googlecloud/README_PubSub_to_BigQuery_Xlang.md index 7cdc228797..02f874fd90 100644 --- a/v2/googlecloud-to-googlecloud/README_PubSub_to_BigQuery_Xlang.md +++ b/v2/googlecloud-to-googlecloud/README_PubSub_to_BigQuery_Xlang.md @@ -20,19 +20,19 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **outputTableSpec** : The BigQuery table to write to, formatted as `"PROJECT_ID:DATASET_NAME.TABLE_NAME"`. +* **outputTableSpec**: The BigQuery table to write to, formatted as `PROJECT_ID:DATASET_NAME.TABLE_NAME`. ### Optional parameters -* **inputTopic** : The Pub/Sub topic to read from, formatted as `"projects//topics/"`. -* **inputSubscription** : The Pub/Sub subscription to read from, formatted as `"projects//subscriptions/"`. -* **outputDeadletterTable** : The BigQuery table to use for messages that failed to reach the output table, formatted as `"PROJECT_ID:DATASET_NAME.TABLE_NAME"`. If the table doesn't exist, it is created when the pipeline runs. If this parameter is not specified, the value `"OUTPUT_TABLE_SPEC_error_records"` is used instead. -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to true. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. -* **useStorageWriteApi** : If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **numStorageWriteApiStreams** : When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. -* **storageWriteApiTriggeringFrequencySec** : When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. -* **pythonExternalTextTransformGcsPath** : The Cloud Storage path pattern for the Python code containing your user-defined functions. (Example: gs://your-bucket/your-function.py). -* **pythonExternalTextTransformFunctionName** : The name of the function to call from your Python file. Use only letters, digits, and underscores. (Example: 'transform' or 'transform_udf1'). +* **inputTopic**: The Pub/Sub topic to read from, formatted as `projects//topics/`. +* **inputSubscription**: The Pub/Sub subscription to read from, formatted as `projects//subscriptions/`. +* **outputDeadletterTable**: The BigQuery table to use for messages that failed to reach the output table, formatted as `PROJECT_ID:DATASET_NAME.TABLE_NAME`. If the table doesn't exist, it is created when the pipeline runs. If this parameter is not specified, the value `OUTPUT_TABLE_SPEC_error_records` is used instead. +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to true. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **useStorageWriteApi**: If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **numStorageWriteApiStreams**: When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. +* **storageWriteApiTriggeringFrequencySec**: When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. +* **pythonExternalTextTransformGcsPath**: The Cloud Storage path pattern for the Python code containing your user-defined functions. For example, `gs://your-bucket/your-function.py`. +* **pythonExternalTextTransformFunctionName**: The name of the function to call from your Python file. Use only letters, digits, and underscores. For example, `'transform' or 'transform_udf1'`. @@ -229,8 +229,8 @@ resource "google_dataflow_flex_template_job" "pubsub_to_bigquery_xlang" { # useStorageWriteApi = "false" # numStorageWriteApiStreams = "0" # storageWriteApiTriggeringFrequencySec = "" - # pythonExternalTextTransformGcsPath = "gs://your-bucket/your-function.py" - # pythonExternalTextTransformFunctionName = "'transform' or 'transform_udf1'" + # pythonExternalTextTransformGcsPath = "" + # pythonExternalTextTransformFunctionName = "" } } ``` diff --git a/v2/googlecloud-to-googlecloud/README_Pubsub_to_Jdbc.md b/v2/googlecloud-to-googlecloud/README_Pubsub_to_Jdbc.md index a25c488f12..eed46b8c56 100644 --- a/v2/googlecloud-to-googlecloud/README_Pubsub_to_Jdbc.md +++ b/v2/googlecloud-to-googlecloud/README_Pubsub_to_Jdbc.md @@ -18,21 +18,21 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputSubscription** : The Pub/Sub input subscription to read from, in the format of 'projects//subscriptions/' (Example: projects/your-project-id/subscriptions/your-subscription-name). -* **driverClassName** : The JDBC driver class name. (Example: com.mysql.jdbc.Driver). -* **connectionUrl** : The JDBC connection URL string. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. Remove whitespace characters from the Base64-encoded string. (Example: jdbc:mysql://some-host:3306/sampledb). -* **driverJars** : Comma separated Cloud Storage paths for JDBC drivers. (Example: gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar). -* **statement** : The statement to run against the database. The statement must specify the column names of the table in any order. Only the values of the specified column names are read from the JSON and added to the statement. (Example: INSERT INTO tableName (column1, column2) VALUES (?,?)). -* **outputDeadletterTopic** : The Pub/Sub topic to forward undeliverable messages to. (Example: projects//topics/). +* **inputSubscription**: The Pub/Sub input subscription to read from. For example, `projects//subscriptions/`. +* **driverClassName**: The JDBC driver class name. For example, `com.mysql.jdbc.Driver`. +* **connectionUrl**: The JDBC connection URL string. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. Remove whitespace characters from the Base64-encoded string. For example, `jdbc:mysql://some-host:3306/sampledb`. +* **driverJars**: Comma separated Cloud Storage paths for JDBC drivers. For example, `gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar`. +* **statement**: The statement to run against the database. The statement must specify the column names of the table in any order. Only the values of the specified column names are read from the JSON and added to the statement. For example, `INSERT INTO tableName (column1, column2) VALUES (?,?)`. +* **outputDeadletterTopic**: The Pub/Sub topic to forward undeliverable messages to. For example, `projects//topics/`. ### Optional parameters -* **username** : The username to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. -* **password** : The password to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. -* **connectionProperties** : The properties string to use for the JDBC connection. The string must use the format `[propertyName=property;]*`. (Example: unicode=true;characterEncoding=UTF-8). -* **KMSEncryptionKey** : The Cloud KMS Encryption Key to use to decrypt the username, password, and connection string. If a Cloud KMS key is passed in, the username, password, and connection string must all be passed in encrypted. (Example: projects/{gcp_project}/locations/{key_region}/keyRings/{key_ring}/cryptoKeys/{kms_key_name}). -* **disabledAlgorithms** : Comma separated algorithms to disable. If this value is set to none, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. (Example: SSLv3, RC4). -* **extraFilesToStage** : Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. (Example: gs:///file.txt,projects//secrets//versions/). +* **username**: The username to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. +* **password**: The password to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. +* **connectionProperties**: The properties string to use for the JDBC connection. The string must use the format `[propertyName=property;]*`. For example, `unicode=true;characterEncoding=UTF-8`. +* **KMSEncryptionKey**: The Cloud KMS Encryption Key to use to decrypt the username, password, and connection string. If a Cloud KMS key is passed in, the username, password, and connection string must all be passed in encrypted. For example, `projects/{gcp_project}/locations/{key_region}/keyRings/{key_ring}/cryptoKeys/{kms_key_name}`. +* **disabledAlgorithms**: Comma separated algorithms to disable. If this value is set to `none`, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. For example, `SSLv3, RC4`. +* **extraFilesToStage**: Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. For example, `gs:///file.txt,projects//secrets//versions/`. @@ -227,18 +227,18 @@ resource "google_dataflow_flex_template_job" "pubsub_to_jdbc" { name = "pubsub-to-jdbc" region = var.region parameters = { - inputSubscription = "projects/your-project-id/subscriptions/your-subscription-name" - driverClassName = "com.mysql.jdbc.Driver" - connectionUrl = "jdbc:mysql://some-host:3306/sampledb" - driverJars = "gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar" - statement = "INSERT INTO tableName (column1, column2) VALUES (?,?)" - outputDeadletterTopic = "projects//topics/" + inputSubscription = "" + driverClassName = "" + connectionUrl = "" + driverJars = "" + statement = "" + outputDeadletterTopic = "" # username = "" # password = "" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" - # KMSEncryptionKey = "projects/{gcp_project}/locations/{key_region}/keyRings/{key_ring}/cryptoKeys/{kms_key_name}" - # disabledAlgorithms = "SSLv3, RC4" - # extraFilesToStage = "gs:///file.txt,projects//secrets//versions/" + # connectionProperties = "" + # KMSEncryptionKey = "" + # disabledAlgorithms = "" + # extraFilesToStage = "" } } ``` diff --git a/v2/googlecloud-to-googlecloud/README_Spanner_Change_Streams_to_BigQuery.md b/v2/googlecloud-to-googlecloud/README_Spanner_Change_Streams_to_BigQuery.md index 01427afc3e..02702d0bcc 100644 --- a/v2/googlecloud-to-googlecloud/README_Spanner_Change_Streams_to_BigQuery.md +++ b/v2/googlecloud-to-googlecloud/README_Spanner_Change_Streams_to_BigQuery.md @@ -98,32 +98,32 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **spannerInstanceId** : The Spanner instance to read change streams from. -* **spannerDatabase** : The Spanner database to read change streams from. -* **spannerMetadataInstanceId** : The Spanner instance to use for the change streams connector metadata table. -* **spannerMetadataDatabase** : The Spanner database to use for the change streams connector metadata table. -* **spannerChangeStreamName** : The name of the Spanner change stream to read from. -* **bigQueryDataset** : The BigQuery dataset for change streams output. +* **spannerInstanceId**: The Spanner instance to read change streams from. +* **spannerDatabase**: The Spanner database to read change streams from. +* **spannerMetadataInstanceId**: The Spanner instance to use for the change streams connector metadata table. +* **spannerMetadataDatabase**: The Spanner database to use for the change streams connector metadata table. +* **spannerChangeStreamName**: The name of the Spanner change stream to read from. +* **bigQueryDataset**: The BigQuery dataset for change streams output. ### Optional parameters -* **spannerProjectId** : The project to read change streams from. This value is also the project where the change streams connector metadata table is created. The default value for this parameter is the project where the Dataflow pipeline is running. -* **spannerDatabaseRole** : The Spanner database role to use when running the template. This parameter is required only when the IAM principal who is running the template is a fine-grained access control user. The database role must have the SELECT privilege on the change stream and the EXECUTE privilege on the change stream's read function. For more information, see Fine-grained access control for change streams (https://cloud.google.com/spanner/docs/fgac-change-streams). -* **spannerMetadataTableName** : The Spanner change streams connector metadata table name to use. If not provided, a Spanner change streams connector metadata table is automatically created during the pipeline flow. You must provide this parameter when updating an existing pipeline. Otherwise, don't provide this parameter. -* **rpcPriority** : The request priority for Spanner calls. The value must be one of the following values: `HIGH`, `MEDIUM`, or `LOW`. The default value is `HIGH`. -* **spannerHost** : The Cloud Spanner endpoint to call in the template. Only used for testing. (Example: https://batch-spanner.googleapis.com). -* **startTimestamp** : The starting DateTime (https://datatracker.ietf.org/doc/html/rfc3339), inclusive, to use for reading change streams. Ex-2021-10-12T07:20:50.52Z. Defaults to the timestamp when the pipeline starts, that is, the current time. -* **endTimestamp** : The ending DateTime (https://datatracker.ietf.org/doc/html/rfc3339), inclusive, to use for reading change streams.Ex-2021-10-12T07:20:50.52Z. Defaults to an infinite time in the future. -* **bigQueryProjectId** : The BigQuery project. The default value is the project for the Dataflow job. -* **bigQueryChangelogTableNameTemplate** : The template for the name of the BigQuery table that contains the changelog. Defaults to: {_metadata_spanner_table_name}_changelog. -* **deadLetterQueueDirectory** : The path to store any unprocessed records. The default path is a directory under the Dataflow job's temp location. The default value is usually sufficient. -* **dlqRetryMinutes** : The number of minutes between dead-letter queue retries. The default value is 10. -* **ignoreFields** : A comma-separated list of fields (case sensitive) to ignore. These fields might be fields of watched tables, or metadata fields added by the pipeline. Ignored fields aren't inserted into BigQuery. When you ignore the _metadata_spanner_table_name field, the bigQueryChangelogTableNameTemplate parameter is also ignored. Defaults to empty. -* **disableDlqRetries** : Whether or not to disable retries for the DLQ. Defaults to: false. -* **useStorageWriteApi** : If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. -* **numStorageWriteApiStreams** : When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. -* **storageWriteApiTriggeringFrequencySec** : When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. +* **spannerProjectId**: The project to read change streams from. This value is also the project where the change streams connector metadata table is created. The default value for this parameter is the project where the Dataflow pipeline is running. +* **spannerDatabaseRole**: The Spanner database role to use when running the template. This parameter is required only when the IAM principal who is running the template is a fine-grained access control user. The database role must have the `SELECT` privilege on the change stream and the `EXECUTE` privilege on the change stream's read function. For more information, see Fine-grained access control for change streams (https://cloud.google.com/spanner/docs/fgac-change-streams). +* **spannerMetadataTableName**: The Spanner change streams connector metadata table name to use. If not provided, a Spanner change streams connector metadata table is automatically created during the pipeline flow. You must provide this parameter when updating an existing pipeline. Otherwise, don't provide this parameter. +* **rpcPriority**: The request priority for Spanner calls. The value must be one of the following values: `HIGH`, `MEDIUM`, or `LOW`. The default value is `HIGH`. +* **spannerHost**: The Cloud Spanner endpoint to call in the template. Only used for testing. For example, `https://batch-spanner.googleapis.com`. +* **startTimestamp**: The starting DateTime (https://datatracker.ietf.org/doc/html/rfc3339), inclusive, to use for reading change streams. Ex-2021-10-12T07:20:50.52Z. Defaults to the timestamp when the pipeline starts, that is, the current time. +* **endTimestamp**: The ending DateTime (https://datatracker.ietf.org/doc/html/rfc3339), inclusive, to use for reading change streams.Ex-2021-10-12T07:20:50.52Z. Defaults to an infinite time in the future. +* **bigQueryProjectId**: The BigQuery project. The default value is the project for the Dataflow job. +* **bigQueryChangelogTableNameTemplate**: The template for the name of the BigQuery table that contains the changelog. Defaults to: {_metadata_spanner_table_name}_changelog. +* **deadLetterQueueDirectory**: The path to store any unprocessed records. The default path is a directory under the Dataflow job's temp location. The default value is usually sufficient. +* **dlqRetryMinutes**: The number of minutes between dead-letter queue retries. The default value is `10`. +* **ignoreFields**: A comma-separated list of fields (case sensitive) to ignore. These fields might be fields of watched tables, or metadata fields added by the pipeline. Ignored fields aren't inserted into BigQuery. When you ignore the _metadata_spanner_table_name field, the bigQueryChangelogTableNameTemplate parameter is also ignored. Defaults to empty. +* **disableDlqRetries**: Whether or not to disable retries for the DLQ. Defaults to: false. +* **useStorageWriteApi**: If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **numStorageWriteApiStreams**: When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. +* **storageWriteApiTriggeringFrequencySec**: When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. @@ -361,7 +361,7 @@ resource "google_dataflow_flex_template_job" "spanner_change_streams_to_bigquery # spannerDatabaseRole = "" # spannerMetadataTableName = "" # rpcPriority = "HIGH" - # spannerHost = "https://batch-spanner.googleapis.com" + # spannerHost = "" # startTimestamp = "" # endTimestamp = "" # bigQueryProjectId = "" diff --git a/v2/googlecloud-to-googlecloud/README_Spanner_Change_Streams_to_Google_Cloud_Storage.md b/v2/googlecloud-to-googlecloud/README_Spanner_Change_Streams_to_Google_Cloud_Storage.md index 88c2cc987c..f5ac99b0ea 100644 --- a/v2/googlecloud-to-googlecloud/README_Spanner_Change_Streams_to_Google_Cloud_Storage.md +++ b/v2/googlecloud-to-googlecloud/README_Spanner_Change_Streams_to_Google_Cloud_Storage.md @@ -42,26 +42,26 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **spannerInstanceId** : The Spanner instance ID to read change streams data from. -* **spannerDatabase** : The Spanner database to read change streams data from. -* **spannerMetadataInstanceId** : The Spanner instance ID to use for the change streams connector metadata table. -* **spannerMetadataDatabase** : The Spanner database to use for the change streams connector metadata table. -* **spannerChangeStreamName** : The name of the Spanner change stream to read from. -* **gcsOutputDirectory** : The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse directory path for date & time formatters. (Example: gs://your-bucket/your-path). +* **spannerInstanceId**: The Spanner instance ID to read change streams data from. +* **spannerDatabase**: The Spanner database to read change streams data from. +* **spannerMetadataInstanceId**: The Spanner instance ID to use for the change streams connector metadata table. +* **spannerMetadataDatabase**: The Spanner database to use for the change streams connector metadata table. +* **spannerChangeStreamName**: The name of the Spanner change stream to read from. +* **gcsOutputDirectory**: The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse directory path for date & time formatters. For example, `gs://your-bucket/your-path`. ### Optional parameters -* **spannerProjectId** : The ID of the Google Cloud project that contains the Spanner database to read change streams from. This project is also where the change streams connector metadata table is created. The default for this parameter is the project where the Dataflow pipeline is running. -* **spannerDatabaseRole** : The Spanner database role to use when running the template. This parameter is required only when the IAM principal who is running the template is a fine-grained access control user. The database role must have the `SELECT` privilege on the change stream and the `EXECUTE` privilege on the change stream's read function. For more information, see Fine-grained access control for change streams (https://cloud.google.com/spanner/docs/fgac-change-streams). -* **spannerMetadataTableName** : The Spanner change streams connector metadata table name to use. If not provided, a Spanner change streams metadata table is automatically created during pipeline execution. You must provide a value for this parameter when updating an existing pipeline. Otherwise, don't use this parameter. -* **startTimestamp** : The starting DateTime, inclusive, to use for reading change streams, in the format Ex-2021-10-12T07:20:50.52Z. Defaults to the timestamp when the pipeline starts, that is, the current time. -* **endTimestamp** : The ending DateTime, inclusive, to use for reading change streams. For example, Ex-2021-10-12T07:20:50.52Z. Defaults to an infinite time in the future. -* **spannerHost** : The Cloud Spanner endpoint to call in the template. Only used for testing. (Example: https://spanner.googleapis.com). Defaults to: https://spanner.googleapis.com. -* **outputFileFormat** : The format of the output Cloud Storage file. Allowed formats are TEXT and AVRO. Defaults to AVRO. -* **windowDuration** : The window duration is the interval in which data is written to the output directory. Configure the duration based on the pipeline's throughput. For example, a higher throughput might require smaller window sizes so that the data fits into memory. Defaults to 5m (five minutes), with a minimum of 1s (one second). Allowed formats are: [int]s (for seconds, example: 5s), [int]m (for minutes, example: 12m), [int]h (for hours, example: 2h). (Example: 5m). -* **rpcPriority** : The request priority for Spanner calls. The value must be HIGH, MEDIUM, or LOW. Defaults to HIGH. -* **outputFilenamePrefix** : The prefix to place on each windowed file. (Example: output-). Defaults to: output. -* **numShards** : The maximum number of output shards produced when writing. A higher number of shards means higher throughput for writing to Cloud Storage, but potentially higher data aggregation cost across shards when processing output Cloud Storage files. Defaults to: 20. +* **spannerProjectId**: The ID of the Google Cloud project that contains the Spanner database to read change streams from. This project is also where the change streams connector metadata table is created. The default for this parameter is the project where the Dataflow pipeline is running. +* **spannerDatabaseRole**: The Spanner database role to use when running the template. This parameter is required only when the IAM principal who is running the template is a fine-grained access control user. The database role must have the `SELECT` privilege on the change stream and the `EXECUTE` privilege on the change stream's read function. For more information, see Fine-grained access control for change streams (https://cloud.google.com/spanner/docs/fgac-change-streams). +* **spannerMetadataTableName**: The Spanner change streams connector metadata table name to use. If not provided, a Spanner change streams metadata table is automatically created during pipeline execution. You must provide a value for this parameter when updating an existing pipeline. Otherwise, don't use this parameter. +* **startTimestamp**: The starting DateTime, inclusive, to use for reading change streams, in the format `Ex-2021-10-12T07:20:50.52Z`. Defaults to the timestamp when the pipeline starts, that is, the current time. +* **endTimestamp**: The ending DateTime, inclusive, to use for reading change streams. For example, `Ex-2021-10-12T07:20:50.52Z`. Defaults to an infinite time in the future. +* **spannerHost**: The Cloud Spanner endpoint to call in the template. Only used for testing. For example, `https://spanner.googleapis.com`. Defaults to: https://spanner.googleapis.com. +* **outputFileFormat**: The format of the output Cloud Storage file. Allowed formats are `TEXT` and `AVRO`. Defaults to `AVRO`. +* **windowDuration**: The window duration is the interval in which data is written to the output directory. Configure the duration based on the pipeline's throughput. For example, a higher throughput might require smaller window sizes so that the data fits into memory. Defaults to 5m (five minutes), with a minimum of 1s (one second). Allowed formats are: [int]s (for seconds, example: 5s), [int]m (for minutes, example: 12m), [int]h (for hours, example: 2h). For example, `5m`. +* **rpcPriority**: The request priority for Spanner calls. The value must be `HIGH`, `MEDIUM`, or `LOW`. Defaults to `HIGH`. +* **outputFilenamePrefix**: The prefix to place on each windowed file. For example, `output-`. Defaults to: output. +* **numShards**: The maximum number of output shards produced when writing. A higher number of shards means higher throughput for writing to Cloud Storage, but potentially higher data aggregation cost across shards when processing output Cloud Storage files. Defaults to: 20. @@ -276,7 +276,7 @@ resource "google_dataflow_flex_template_job" "spanner_change_streams_to_google_c spannerMetadataInstanceId = "" spannerMetadataDatabase = "" spannerChangeStreamName = "" - gcsOutputDirectory = "gs://your-bucket/your-path" + gcsOutputDirectory = "" # spannerProjectId = "" # spannerDatabaseRole = "" # spannerMetadataTableName = "" @@ -286,7 +286,7 @@ resource "google_dataflow_flex_template_job" "spanner_change_streams_to_google_c # outputFileFormat = "AVRO" # windowDuration = "5m" # rpcPriority = "HIGH" - # outputFilenamePrefix = "output-" + # outputFilenamePrefix = "output" # numShards = "20" } } diff --git a/v2/googlecloud-to-googlecloud/README_Spanner_Change_Streams_to_PubSub.md b/v2/googlecloud-to-googlecloud/README_Spanner_Change_Streams_to_PubSub.md index cc4063bae8..5b313986e0 100644 --- a/v2/googlecloud-to-googlecloud/README_Spanner_Change_Streams_to_PubSub.md +++ b/v2/googlecloud-to-googlecloud/README_Spanner_Change_Streams_to_PubSub.md @@ -33,27 +33,27 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **spannerInstanceId** : The Spanner instance to read change streams from. -* **spannerDatabase** : The Spanner database to read change streams from. -* **spannerMetadataInstanceId** : The Spanner instance to use for the change streams connector metadata table. -* **spannerMetadataDatabase** : The Spanner database to use for the change streams connector metadata table. -* **spannerChangeStreamName** : The name of the Spanner change stream to read from. -* **pubsubTopic** : The Pub/Sub topic for change streams output. +* **spannerInstanceId**: The Spanner instance to read change streams from. +* **spannerDatabase**: The Spanner database to read change streams from. +* **spannerMetadataInstanceId**: The Spanner instance to use for the change streams connector metadata table. +* **spannerMetadataDatabase**: The Spanner database to use for the change streams connector metadata table. +* **spannerChangeStreamName**: The name of the Spanner change stream to read from. +* **pubsubTopic**: The Pub/Sub topic for change streams output. ### Optional parameters -* **spannerProjectId** : The project to read change streams from. This project is also where the change streams connector metadata table is created. The default for this parameter is the project where the Dataflow pipeline is running. -* **spannerDatabaseRole** : The Spanner database role to use when running the template. This parameter is required only when the IAM principal who is running the template is a fine-grained access control user. The database role must have the `SELECT` privilege on the change stream and the `EXECUTE` privilege on the change stream's read function. For more information, see Fine-grained access control for change streams (https://cloud.google.com/spanner/docs/fgac-change-streams). -* **spannerMetadataTableName** : The Spanner change streams connector metadata table name to use. If not provided, Spanner automatically creates the streams connector metadata table during the pipeline flow change. You must provide this parameter when updating an existing pipeline. Don't use this parameter for other cases. -* **startTimestamp** : The starting DateTime (https://tools.ietf.org/html/rfc3339), inclusive, to use for reading change streams. For example, ex- 2021-10-12T07:20:50.52Z. Defaults to the timestamp when the pipeline starts, that is, the current time. -* **endTimestamp** : The ending DateTime (https://tools.ietf.org/html/rfc3339), inclusive, to use for reading change streams. For example, ex- 2021-10-12T07:20:50.52Z. Defaults to an infinite time in the future. -* **spannerHost** : The Cloud Spanner endpoint to call in the template. Only used for testing. (Example: https://spanner.googleapis.com). Defaults to: https://spanner.googleapis.com. -* **outputDataFormat** : The format of the output. Output is wrapped in many PubsubMessages and sent to a Pub/Sub topic. Allowed formats are JSON and AVRO. Default is JSON. -* **pubsubAPI** : The Pub/Sub API used to implement the pipeline. Allowed APIs are `pubsubio` and `native_client`. For a small number of queries per second (QPS), `native_client` has less latency. For a large number of QPS, `pubsubio` provides better and more stable performance. The default is `pubsubio`. -* **pubsubProjectId** : Project of Pub/Sub topic. The default for this parameter is the project where the Dataflow pipeline is running. -* **rpcPriority** : The request priority for Spanner calls. Allowed values are HIGH, MEDIUM, and LOW. Defaults to: HIGH). -* **includeSpannerSource** : Whether or not to include the spanner database id and instance id to read the change stream from in the output message data. Defaults to: false. -* **outputMessageMetadata** : The string value for the custom field outputMessageMetadata in output pub/sub message. Defaults to empty and the field outputMessageMetadata is only populated if this value is non-empty. Please escape any special characters when entering the value here(ie: double quotes). +* **spannerProjectId**: The project to read change streams from. This project is also where the change streams connector metadata table is created. The default for this parameter is the project where the Dataflow pipeline is running. +* **spannerDatabaseRole**: The Spanner database role to use when running the template. This parameter is required only when the IAM principal who is running the template is a fine-grained access control user. The database role must have the `SELECT` privilege on the change stream and the `EXECUTE` privilege on the change stream's read function. For more information, see Fine-grained access control for change streams (https://cloud.google.com/spanner/docs/fgac-change-streams). +* **spannerMetadataTableName**: The Spanner change streams connector metadata table name to use. If not provided, Spanner automatically creates the streams connector metadata table during the pipeline flow change. You must provide this parameter when updating an existing pipeline. Don't use this parameter for other cases. +* **startTimestamp**: The starting DateTime (https://tools.ietf.org/html/rfc3339), inclusive, to use for reading change streams. For example, ex- 2021-10-12T07:20:50.52Z. Defaults to the timestamp when the pipeline starts, that is, the current time. +* **endTimestamp**: The ending DateTime (https://tools.ietf.org/html/rfc3339), inclusive, to use for reading change streams. For example, ex- 2021-10-12T07:20:50.52Z. Defaults to an infinite time in the future. +* **spannerHost**: The Cloud Spanner endpoint to call in the template. Only used for testing. For example, `https://spanner.googleapis.com`. Defaults to: https://spanner.googleapis.com. +* **outputDataFormat**: The format of the output. Output is wrapped in many PubsubMessages and sent to a Pub/Sub topic. Allowed formats are JSON and AVRO. Default is JSON. +* **pubsubAPI**: The Pub/Sub API used to implement the pipeline. Allowed APIs are `pubsubio` and `native_client`. For a small number of queries per second (QPS), `native_client` has less latency. For a large number of QPS, `pubsubio` provides better and more stable performance. The default is `pubsubio`. +* **pubsubProjectId**: Project of Pub/Sub topic. The default for this parameter is the project where the Dataflow pipeline is running. +* **rpcPriority**: The request priority for Spanner calls. Allowed values are HIGH, MEDIUM, and LOW. Defaults to: HIGH). +* **includeSpannerSource**: Whether or not to include the spanner database id and instance id to read the change stream from in the output message data. Defaults to: false. +* **outputMessageMetadata**: The string value for the custom field outputMessageMetadata in output pub/sub message. Defaults to empty and the field outputMessageMetadata is only populated if this value is non-empty. Please escape any special characters when entering the value here(ie: double quotes). diff --git a/v2/googlecloud-to-googlecloud/README_Stream_DLP_GCS_Text_to_BigQuery_Flex.md b/v2/googlecloud-to-googlecloud/README_Stream_DLP_GCS_Text_to_BigQuery_Flex.md index 901afa480b..5c4b4355db 100644 --- a/v2/googlecloud-to-googlecloud/README_Stream_DLP_GCS_Text_to_BigQuery_Flex.md +++ b/v2/googlecloud-to-googlecloud/README_Stream_DLP_GCS_Text_to_BigQuery_Flex.md @@ -34,19 +34,19 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputFilePattern** : The Cloud Storage location of the files you'd like to process. (Example: gs://your-bucket/your-files/*.csv). -* **deidentifyTemplateName** : Cloud DLP template to deidentify contents. Must be created here: https://console.cloud.google.com/security/dlp/create/template. (Example: projects/your-project-id/locations/global/deidentifyTemplates/generated_template_id). -* **datasetName** : BigQuery Dataset to be used. Dataset must exist prior to execution. Ex. pii_dataset. -* **dlpProjectId** : Cloud DLP project ID to be used for data masking/tokenization. Ex. your-dlp-project. +* **inputFilePattern**: The Cloud Storage location of the files you'd like to process. For example, `gs://your-bucket/your-files/*.csv`. +* **deidentifyTemplateName**: Cloud DLP template to deidentify contents. Must be created here: https://console.cloud.google.com/security/dlp/create/template. For example, `projects/your-project-id/locations/global/deidentifyTemplates/generated_template_id`. +* **datasetName**: BigQuery Dataset to be used. Dataset must exist prior to execution. Ex. pii_dataset. +* **dlpProjectId**: Cloud DLP project ID to be used for data masking/tokenization. Ex. your-dlp-project. ### Optional parameters -* **inspectTemplateName** : Cloud DLP template to inspect contents. (Example: projects/your-project-id/locations/global/inspectTemplates/generated_template_id). -* **batchSize** : Batch size contents (number of rows) to optimize DLP API call. Total size of the rows must not exceed 512 KB and total cell count must not exceed 50,000. Default batch size is set to 100. Ex. 1000. -* **useStorageWriteApi** : If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. -* **numStorageWriteApiStreams** : When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. -* **storageWriteApiTriggeringFrequencySec** : When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. +* **inspectTemplateName**: Cloud DLP template to inspect contents. For example, `projects/your-project-id/locations/global/inspectTemplates/generated_template_id`. +* **batchSize**: Batch size contents (number of rows) to optimize DLP API call. Total size of the rows must not exceed 512 KB and total cell count must not exceed 50,000. Default batch size is set to 100. Ex. 1000. +* **useStorageWriteApi**: If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **numStorageWriteApiStreams**: When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. +* **storageWriteApiTriggeringFrequencySec**: When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. @@ -235,11 +235,11 @@ resource "google_dataflow_flex_template_job" "stream_dlp_gcs_text_to_bigquery_fl name = "stream-dlp-gcs-text-to-bigquery-flex" region = var.region parameters = { - inputFilePattern = "gs://your-bucket/your-files/*.csv" - deidentifyTemplateName = "projects/your-project-id/locations/global/deidentifyTemplates/generated_template_id" + inputFilePattern = "" + deidentifyTemplateName = "" datasetName = "" dlpProjectId = "" - # inspectTemplateName = "projects/your-project-id/locations/global/inspectTemplates/generated_template_id" + # inspectTemplateName = "" # batchSize = "100" # useStorageWriteApi = "false" # useStorageWriteApiAtLeastOnce = "false" diff --git a/v2/googlecloud-to-googlecloud/README_Stream_GCS_Text_to_BigQuery_Flex.md b/v2/googlecloud-to-googlecloud/README_Stream_GCS_Text_to_BigQuery_Flex.md index c6243bb30f..46db964d2e 100644 --- a/v2/googlecloud-to-googlecloud/README_Stream_GCS_Text_to_BigQuery_Flex.md +++ b/v2/googlecloud-to-googlecloud/README_Stream_GCS_Text_to_BigQuery_Flex.md @@ -30,22 +30,22 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputFilePattern** : The gs:// path to the text in Cloud Storage you'd like to process. (Example: gs://your-bucket/your-file.txt). -* **JSONPath** : The gs:// path to the JSON file that defines your BigQuery schema, stored in Cloud Storage. (Example: gs://your-bucket/your-schema.json). -* **outputTable** : The location of the BigQuery table to use to store the processed data. If you reuse an existing table, it is overwritten. (Example: :.). -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the `.js` file that defines the JavaScript user-defined function (UDF) you want to use. (Example: gs://your-bucket/your-transforms/*.js). -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) that you want to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples) (Example: transform_udf1). -* **bigQueryLoadingTemporaryDirectory** : Temporary directory for BigQuery loading process. (Example: gs://your-bucket/your-files/temp-dir). +* **inputFilePattern**: The gs:// path to the text in Cloud Storage you'd like to process. For example, `gs://your-bucket/your-file.txt`. +* **JSONPath**: The gs:// path to the JSON file that defines your BigQuery schema, stored in Cloud Storage. For example, `gs://your-bucket/your-schema.json`. +* **outputTable**: The location of the BigQuery table to use to store the processed data. If you reuse an existing table, it is overwritten. For example, `:.`. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the `.js` file that defines the JavaScript user-defined function (UDF) you want to use. For example, `gs://your-bucket/your-transforms/*.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) that you want to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples) For example, `transform_udf1`. +* **bigQueryLoadingTemporaryDirectory**: Temporary directory for BigQuery loading process. For example, `gs://your-bucket/your-files/temp-dir`. ### Optional parameters -* **outputDeadletterTable** : Table for messages that failed to reach the output table. If a table doesn't exist, it is created during pipeline execution. If not specified, `_error_records` is used. (Example: :.). -* **useStorageWriteApiAtLeastOnce** : This parameter takes effect only if "Use BigQuery Storage Write API" is enabled. If enabled the at-least-once semantics will be used for Storage Write API, otherwise exactly-once semantics will be used. Defaults to: false. -* **useStorageWriteApi** : If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **numStorageWriteApiStreams** : When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. -* **storageWriteApiTriggeringFrequencySec** : When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. -* **pythonExternalTextTransformGcsPath** : The Cloud Storage path pattern for the Python code containing your user-defined functions. (Example: gs://your-bucket/your-function.py). -* **javascriptTextTransformReloadIntervalMinutes** : Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is 0, UDF reloading is disabled. The default value is 0. +* **outputDeadletterTable**: Table for messages that failed to reach the output table. If a table doesn't exist, it is created during pipeline execution. If not specified, `_error_records` is used. For example, `:.`. +* **useStorageWriteApiAtLeastOnce**: This parameter takes effect only if `Use BigQuery Storage Write API` is enabled. If enabled the at-least-once semantics will be used for Storage Write API, otherwise exactly-once semantics will be used. Defaults to: false. +* **useStorageWriteApi**: If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **numStorageWriteApiStreams**: When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. +* **storageWriteApiTriggeringFrequencySec**: When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. +* **pythonExternalTextTransformGcsPath**: The Cloud Storage path pattern for the Python code containing your user-defined functions. For example, `gs://your-bucket/your-function.py`. +* **javascriptTextTransformReloadIntervalMinutes**: Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is `0`, UDF reloading is disabled. The default value is `0`. ## User-Defined functions (UDFs) @@ -253,18 +253,18 @@ resource "google_dataflow_flex_template_job" "stream_gcs_text_to_bigquery_flex" name = "stream-gcs-text-to-bigquery-flex" region = var.region parameters = { - inputFilePattern = "gs://your-bucket/your-file.txt" - JSONPath = "gs://your-bucket/your-schema.json" - outputTable = ":." - javascriptTextTransformGcsPath = "gs://your-bucket/your-transforms/*.js" - javascriptTextTransformFunctionName = "transform_udf1" - bigQueryLoadingTemporaryDirectory = "gs://your-bucket/your-files/temp-dir" - # outputDeadletterTable = ":." + inputFilePattern = "" + JSONPath = "" + outputTable = "" + javascriptTextTransformGcsPath = "" + javascriptTextTransformFunctionName = "" + bigQueryLoadingTemporaryDirectory = "" + # outputDeadletterTable = "" # useStorageWriteApiAtLeastOnce = "false" # useStorageWriteApi = "false" # numStorageWriteApiStreams = "0" # storageWriteApiTriggeringFrequencySec = "" - # pythonExternalTextTransformGcsPath = "gs://your-bucket/your-function.py" + # pythonExternalTextTransformGcsPath = "" # javascriptTextTransformReloadIntervalMinutes = "0" } } diff --git a/v2/googlecloud-to-googlecloud/README_Stream_GCS_Text_to_BigQuery_Xlang.md b/v2/googlecloud-to-googlecloud/README_Stream_GCS_Text_to_BigQuery_Xlang.md index 33d45a8d29..c5649655b4 100644 --- a/v2/googlecloud-to-googlecloud/README_Stream_GCS_Text_to_BigQuery_Xlang.md +++ b/v2/googlecloud-to-googlecloud/README_Stream_GCS_Text_to_BigQuery_Xlang.md @@ -30,20 +30,20 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputFilePattern** : The gs:// path to the text in Cloud Storage you'd like to process. (Example: gs://your-bucket/your-file.txt). -* **JSONPath** : The gs:// path to the JSON file that defines your BigQuery schema, stored in Cloud Storage. (Example: gs://your-bucket/your-schema.json). -* **outputTable** : The location of the BigQuery table to use to store the processed data. If you reuse an existing table, it is overwritten. (Example: :.). -* **bigQueryLoadingTemporaryDirectory** : Temporary directory for BigQuery loading process. (Example: gs://your-bucket/your-files/temp-dir). +* **inputFilePattern**: The gs:// path to the text in Cloud Storage you'd like to process. For example, `gs://your-bucket/your-file.txt`. +* **JSONPath**: The gs:// path to the JSON file that defines your BigQuery schema, stored in Cloud Storage. For example, `gs://your-bucket/your-schema.json`. +* **outputTable**: The location of the BigQuery table to use to store the processed data. If you reuse an existing table, it is overwritten. For example, `:.`. +* **bigQueryLoadingTemporaryDirectory**: Temporary directory for BigQuery loading process. For example, `gs://your-bucket/your-files/temp-dir`. ### Optional parameters -* **outputDeadletterTable** : Table for messages that failed to reach the output table. If a table doesn't exist, it is created during pipeline execution. If not specified, `_error_records` is used. (Example: :.). -* **useStorageWriteApiAtLeastOnce** : This parameter takes effect only if "Use BigQuery Storage Write API" is enabled. If enabled the at-least-once semantics will be used for Storage Write API, otherwise exactly-once semantics will be used. Defaults to: false. -* **useStorageWriteApi** : If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **numStorageWriteApiStreams** : When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. -* **storageWriteApiTriggeringFrequencySec** : When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. -* **pythonExternalTextTransformGcsPath** : The Cloud Storage path pattern for the Python code containing your user-defined functions. (Example: gs://your-bucket/your-function.py). -* **pythonExternalTextTransformFunctionName** : The name of the function to call from your Python file. Use only letters, digits, and underscores. (Example: 'transform' or 'transform_udf1'). +* **outputDeadletterTable**: Table for messages that failed to reach the output table. If a table doesn't exist, it is created during pipeline execution. If not specified, `_error_records` is used. For example, `:.`. +* **useStorageWriteApiAtLeastOnce**: This parameter takes effect only if `Use BigQuery Storage Write API` is enabled. If enabled the at-least-once semantics will be used for Storage Write API, otherwise exactly-once semantics will be used. Defaults to: false. +* **useStorageWriteApi**: If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **numStorageWriteApiStreams**: When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. +* **storageWriteApiTriggeringFrequencySec**: When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. +* **pythonExternalTextTransformGcsPath**: The Cloud Storage path pattern for the Python code containing your user-defined functions. For example, `gs://your-bucket/your-function.py`. +* **pythonExternalTextTransformFunctionName**: The name of the function to call from your Python file. Use only letters, digits, and underscores. For example, `'transform' or 'transform_udf1'`. @@ -235,17 +235,17 @@ resource "google_dataflow_flex_template_job" "stream_gcs_text_to_bigquery_xlang" name = "stream-gcs-text-to-bigquery-xlang" region = var.region parameters = { - inputFilePattern = "gs://your-bucket/your-file.txt" - JSONPath = "gs://your-bucket/your-schema.json" - outputTable = ":." - bigQueryLoadingTemporaryDirectory = "gs://your-bucket/your-files/temp-dir" - # outputDeadletterTable = ":." + inputFilePattern = "" + JSONPath = "" + outputTable = "" + bigQueryLoadingTemporaryDirectory = "" + # outputDeadletterTable = "" # useStorageWriteApiAtLeastOnce = "false" # useStorageWriteApi = "false" # numStorageWriteApiStreams = "0" # storageWriteApiTriggeringFrequencySec = "" - # pythonExternalTextTransformGcsPath = "gs://your-bucket/your-function.py" - # pythonExternalTextTransformFunctionName = "'transform' or 'transform_udf1'" + # pythonExternalTextTransformGcsPath = "" + # pythonExternalTextTransformFunctionName = "" } } ``` diff --git a/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/BigtableChangeStreamToBigQueryOptions.java b/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/BigtableChangeStreamToBigQueryOptions.java index 81717246b1..39a7593438 100644 --- a/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/BigtableChangeStreamToBigQueryOptions.java +++ b/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/BigtableChangeStreamToBigQueryOptions.java @@ -55,8 +55,8 @@ public interface BigtableChangeStreamToBigQueryOptions optional = true, description = "Write values as BigQuery BYTES", helpText = - "When set true values are written to BYTES column, otherwise to STRING column. " - + "Defaults to false.") + "When set to `true`, values are written to a column of type BYTES, otherwise to a column of type STRING . " + + "Defaults to: `false`.") @Default.Boolean(false) Boolean getWriteValuesAsBytes(); @@ -67,7 +67,7 @@ public interface BigtableChangeStreamToBigQueryOptions optional = true, description = "Write Bigtable timestamp as BigQuery INT", helpText = - "Whether to write the Bigtable timestamp as BigQuery `INT64`. When set to true, values are written to the `INT64` column." + "Whether to write the Bigtable timestamp as BigQuery INT64. When set to `true`, values are written to the INT64 column." + " Otherwise, values are written to the `TIMESTAMP` column. Columns affected: `timestamp`, `timestamp_from`, " + "and `timestamp_to`. Defaults to `false`. When set to `true`, the time is measured in microseconds " + "since the Unix epoch (January 1, 1970 at UTC).") @@ -117,7 +117,7 @@ public interface BigtableChangeStreamToBigQueryOptions optional = true, description = "Sets partition expiration time in milliseconds", helpText = - "Sets the changelog table partition expiration time, in milliseconds. When set to true, " + "Sets the changelog table partition expiration time, in milliseconds. When set to `true`, " + "partitions older than the specified number of milliseconds are deleted. " + "By default, no expiration is set.") Long getBigQueryChangelogTablePartitionExpirationMs(); diff --git a/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/BigtableChangeStreamsToPubSubOptions.java b/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/BigtableChangeStreamsToPubSubOptions.java index d4e7b519d1..14d30ac153 100644 --- a/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/BigtableChangeStreamsToPubSubOptions.java +++ b/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/BigtableChangeStreamsToPubSubOptions.java @@ -80,7 +80,7 @@ public interface BigtableChangeStreamsToPubSubOptions optional = true, description = "Strip values for SetCell mutation", helpText = - "When set to true, the SET_CELL mutations are returned without new values set. Defaults to false. This parameter is useful when you don't need a new value to be present, also known as cache invalidation, or when values are extremely large and exceed Pub/Sub message size limits.") + "When set to `true`, the `SET_CELL` mutations are returned without new values set. Defaults to `false`. This parameter is useful when you don't need a new value to be present, also known as cache invalidation, or when values are extremely large and exceed Pub/Sub message size limits.") @Default.Boolean(false) Boolean getStripValues(); diff --git a/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/PubsubToJdbcOptions.java b/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/PubsubToJdbcOptions.java index a813f01beb..d5fda6508e 100644 --- a/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/PubsubToJdbcOptions.java +++ b/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/PubsubToJdbcOptions.java @@ -28,9 +28,8 @@ public interface PubsubToJdbcOptions extends CommonTemplateOptions { order = 1, groupName = "Source", description = "Pub/Sub input subscription", - helpText = - "The Pub/Sub input subscription to read from, in the format of 'projects//subscriptions/'", - example = "projects/your-project-id/subscriptions/your-subscription-name") + helpText = "The Pub/Sub input subscription to read from.", + example = "projects//subscriptions/") @Validation.Required String getInputSubscription(); diff --git a/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/SpannerChangeStreamsToBigQueryOptions.java b/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/SpannerChangeStreamsToBigQueryOptions.java index 5aa77f880a..d92075fb06 100644 --- a/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/SpannerChangeStreamsToBigQueryOptions.java +++ b/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/SpannerChangeStreamsToBigQueryOptions.java @@ -68,8 +68,8 @@ public interface SpannerChangeStreamsToBigQueryOptions description = "Spanner database role", helpText = "The Spanner database role to use when running the template. This parameter is required only when the IAM principal who is running the template is a" - + " fine-grained access control user. The database role must have the SELECT privilege on the change stream" - + " and the EXECUTE privilege on the change stream's read function. For more information, see" + + " fine-grained access control user. The database role must have the `SELECT` privilege on the change stream" + + " and the `EXECUTE` privilege on the change stream's read function. For more information, see" + " Fine-grained access control for change streams (https://cloud.google.com/spanner/docs/fgac-change-streams).") String getSpannerDatabaseRole(); @@ -217,7 +217,7 @@ public interface SpannerChangeStreamsToBigQueryOptions optional = true, description = "Dead letter queue retry minutes", helpText = - "The number of minutes between dead-letter queue retries. The default value is 10.") + "The number of minutes between dead-letter queue retries. The default value is `10`.") @Default.Integer(10) Integer getDlqRetryMinutes(); diff --git a/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/SpannerChangeStreamsToGcsOptions.java b/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/SpannerChangeStreamsToGcsOptions.java index 2667ade3db..11adfdd2bf 100644 --- a/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/SpannerChangeStreamsToGcsOptions.java +++ b/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/SpannerChangeStreamsToGcsOptions.java @@ -119,7 +119,7 @@ public interface SpannerChangeStreamsToGcsOptions optional = true, description = "The timestamp to read change streams from", helpText = - "The starting DateTime, inclusive, to use for reading change streams, in the format Ex-2021-10-12T07:20:50.52Z. Defaults to the timestamp when the pipeline starts, that is, the current time.") + "The starting DateTime, inclusive, to use for reading change streams, in the format `Ex-2021-10-12T07:20:50.52Z`. Defaults to the timestamp when the pipeline starts, that is, the current time.") @Default.String("") String getStartTimestamp(); @@ -130,7 +130,7 @@ public interface SpannerChangeStreamsToGcsOptions optional = true, description = "The timestamp to read change streams to", helpText = - "The ending DateTime, inclusive, to use for reading change streams. For example, Ex-2021-10-12T07:20:50.52Z. Defaults to an infinite time in the future.") + "The ending DateTime, inclusive, to use for reading change streams. For example, `Ex-2021-10-12T07:20:50.52Z`. Defaults to an infinite time in the future.") @Default.String("") String getEndTimestamp(); @@ -153,7 +153,7 @@ public interface SpannerChangeStreamsToGcsOptions optional = true, description = "Output file format", helpText = - "The format of the output Cloud Storage file. Allowed formats are TEXT and AVRO. Defaults to AVRO.") + "The format of the output Cloud Storage file. Allowed formats are `TEXT` and `AVRO`. Defaults to `AVRO`.") @Default.Enum("AVRO") FileFormat getOutputFileFormat(); @@ -181,7 +181,7 @@ public interface SpannerChangeStreamsToGcsOptions optional = true, description = "Priority for Spanner RPC invocations", helpText = - "The request priority for Spanner calls. The value must be HIGH, MEDIUM, or LOW. Defaults to HIGH.") + "The request priority for Spanner calls. The value must be `HIGH`, `MEDIUM`, or `LOW`. Defaults to `HIGH`.") @Default.Enum("HIGH") RpcPriority getRpcPriority(); diff --git a/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/templates/PubSubToBigQuery.java b/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/templates/PubSubToBigQuery.java index a88f14f363..7e798c674f 100644 --- a/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/templates/PubSubToBigQuery.java +++ b/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/templates/PubSubToBigQuery.java @@ -184,7 +184,7 @@ public interface Options groupName = "Target", description = "BigQuery output table", helpText = - "The BigQuery table to write to, formatted as `\"PROJECT_ID:DATASET_NAME.TABLE_NAME\"`.") + "The BigQuery table to write to, formatted as `PROJECT_ID:DATASET_NAME.TABLE_NAME`.") String getOutputTableSpec(); void setOutputTableSpec(String value); @@ -195,7 +195,7 @@ public interface Options optional = true, description = "Input Pub/Sub topic", helpText = - "The Pub/Sub topic to read from, formatted as `\"projects//topics/\"`.") + "The Pub/Sub topic to read from, formatted as `projects//topics/`.") String getInputTopic(); void setInputTopic(String value); @@ -207,7 +207,7 @@ public interface Options description = "Pub/Sub input subscription", helpText = "The Pub/Sub subscription to read from, " - + "formatted as `\"projects//subscriptions/\"`.") + + "formatted as `projects//subscriptions/`.") String getInputSubscription(); void setInputSubscription(String value); @@ -219,10 +219,10 @@ public interface Options "Table for messages failed to reach the output table (i.e., Deadletter table)", helpText = "The BigQuery table to use for messages that failed to reach the output table, " - + "formatted as `\"PROJECT_ID:DATASET_NAME.TABLE_NAME\"`. If the table " + + "formatted as `PROJECT_ID:DATASET_NAME.TABLE_NAME`. If the table " + "doesn't exist, it is created when the pipeline runs. " + "If this parameter is not specified, " - + "the value `\"OUTPUT_TABLE_SPEC_error_records\"` is used instead.") + + "the value `OUTPUT_TABLE_SPEC_error_records` is used instead.") String getOutputDeadletterTable(); void setOutputDeadletterTable(String value); diff --git a/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/templates/TextToBigQueryStreaming.java b/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/templates/TextToBigQueryStreaming.java index 3fbb79d6a0..e034993bac 100644 --- a/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/templates/TextToBigQueryStreaming.java +++ b/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/templates/TextToBigQueryStreaming.java @@ -495,7 +495,7 @@ public interface TextToBigQueryStreamingOptions parentTriggerValues = {"true"}, description = "Use at at-least-once semantics in BigQuery Storage Write API", helpText = - "This parameter takes effect only if \"Use BigQuery Storage Write API\" is enabled. If" + "This parameter takes effect only if `Use BigQuery Storage Write API` is enabled. If" + " enabled the at-least-once semantics will be used for Storage Write API, otherwise" + " exactly-once semantics will be used.", hiddenUi = true) diff --git a/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/templates/pubsubtotext/PubsubToText.java b/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/templates/pubsubtotext/PubsubToText.java index 0b66afd011..6144929fd8 100644 --- a/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/templates/pubsubtotext/PubsubToText.java +++ b/v2/googlecloud-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/templates/pubsubtotext/PubsubToText.java @@ -81,10 +81,9 @@ public interface Options optional = true, description = "Pub/Sub input topic", helpText = - "The Pub/Sub topic to read the input from. The topic name should be in the format " - + "`projects//topics/`. If this parameter is provided " + "The Pub/Sub topic to read the input from. If this parameter is provided " + "don't use `inputSubscription`.", - example = "projects/your-project-id/topics/your-topic-name") + example = "projects//topics/") String getInputTopic(); void setInputTopic(String value); @@ -95,10 +94,9 @@ public interface Options optional = true, description = "Pub/Sub input subscription", helpText = - "The Pub/Sub subscription to read the input from. The subscription name uses the format " - + "`projects//subscription/`. If this parameter is " + "The Pub/Sub subscription to read the input from. If this parameter is " + "provided, don't use `inputTopic`.", - example = "projects/your-project-id/subscriptions/your-subscription-name") + example = "projects//subscription/") String getInputSubscription(); void setInputSubscription(String value); @@ -110,7 +108,7 @@ public interface Options helpText = "The path and filename prefix to write write output files to. " + "This value must end in a slash.", - example = "gs://your-bucket/your-path") + example = "gs://your-bucket/your-path/") @Required String getOutputDirectory(); diff --git a/v2/googlecloud-to-mongodb/README_BigQuery_to_MongoDB.md b/v2/googlecloud-to-mongodb/README_BigQuery_to_MongoDB.md index 6ff2d00015..c57cc31c07 100644 --- a/v2/googlecloud-to-mongodb/README_BigQuery_to_MongoDB.md +++ b/v2/googlecloud-to-mongodb/README_BigQuery_to_MongoDB.md @@ -18,10 +18,10 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **mongoDbUri** : The MongoDB connection URI in the format mongodb+srv://:@. -* **database** : Database in MongoDB to store the collection. (Example: my-db). -* **collection** : The name of the collection in the MongoDB database. (Example: my-collection). -* **inputTableSpec** : The BigQuery table to read from. (Example: bigquery-project:dataset.input_table). +* **mongoDbUri**: The MongoDB connection URI in the format `mongodb+srv://:@`. +* **database**: Database in MongoDB to store the collection. For example, `my-db`. +* **collection**: The name of the collection in the MongoDB database. For example, `my-collection`. +* **inputTableSpec**: The BigQuery table to read from. For example, `bigquery-project:dataset.input_table`. ### Optional parameters @@ -196,9 +196,9 @@ resource "google_dataflow_flex_template_job" "bigquery_to_mongodb" { region = var.region parameters = { mongoDbUri = "" - database = "my-db" - collection = "my-collection" - inputTableSpec = "bigquery-project:dataset.input_table" + database = "" + collection = "" + inputTableSpec = "" } } ``` diff --git a/v2/googlecloud-to-mongodb/src/main/java/com/google/cloud/teleport/v2/mongodb/options/BigQueryToMongoDbOptions.java b/v2/googlecloud-to-mongodb/src/main/java/com/google/cloud/teleport/v2/mongodb/options/BigQueryToMongoDbOptions.java index 54fe4eddfa..d85130e296 100644 --- a/v2/googlecloud-to-mongodb/src/main/java/com/google/cloud/teleport/v2/mongodb/options/BigQueryToMongoDbOptions.java +++ b/v2/googlecloud-to-mongodb/src/main/java/com/google/cloud/teleport/v2/mongodb/options/BigQueryToMongoDbOptions.java @@ -31,7 +31,7 @@ public interface MongoDbOptions extends PipelineOptions, DataflowPipelineOptions order = 1, groupName = "Target", description = "MongoDB Connection URI", - helpText = "The MongoDB connection URI in the format mongodb+srv://:@.") + helpText = "The MongoDB connection URI in the format `mongodb+srv://:@`.") String getMongoDbUri(); void setMongoDbUri(String getMongoDbUri); diff --git a/v2/googlecloud-to-neo4j/README_Google_Cloud_to_Neo4j.md b/v2/googlecloud-to-neo4j/README_Google_Cloud_to_Neo4j.md index 34a618fd00..031c47d4ff 100644 --- a/v2/googlecloud-to-neo4j/README_Google_Cloud_to_Neo4j.md +++ b/v2/googlecloud-to-neo4j/README_Google_Cloud_to_Neo4j.md @@ -20,17 +20,17 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **jobSpecUri** : The path to the job specification file, which contains the configuration for source and target metadata. +* **jobSpecUri**: The path to the job specification file, which contains the configuration for source and target metadata. ### Optional parameters -* **neo4jConnectionUri** : The path to the Neo4j connection metadata JSON file. -* **neo4jConnectionSecretId** : The secret ID for the Neo4j connection metadata. This is an alternative to the GCS path option. -* **optionsJson** : Options JSON. Use runtime tokens. (Example: {token1:value1,token2:value2}). Defaults to empty. -* **readQuery** : Override SQL query. Defaults to empty. -* **inputFilePattern** : Override text file pattern (Example: gs://your-bucket/path/*.json). Defaults to empty. -* **disabledAlgorithms** : Comma separated algorithms to disable. If this value is set to none, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. (Example: SSLv3, RC4). -* **extraFilesToStage** : Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. (Example: gs:///file.txt,projects//secrets//versions/). +* **neo4jConnectionUri**: The path to the Neo4j connection metadata JSON file. +* **neo4jConnectionSecretId**: The secret ID for the Neo4j connection metadata. This is an alternative to the GCS path option. +* **optionsJson**: Options JSON. Use runtime tokens. For example, `{token1:value1,token2:value2}`. Defaults to empty. +* **readQuery**: Override SQL query. Defaults to empty. +* **inputFilePattern**: Override text file pattern For example, `gs://your-bucket/path/*.json`. Defaults to empty. +* **disabledAlgorithms**: Comma separated algorithms to disable. If this value is set to `none`, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. For example, `SSLv3, RC4`. +* **extraFilesToStage**: Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. For example, `gs:///file.txt,projects//secrets//versions/`. @@ -216,11 +216,11 @@ resource "google_dataflow_flex_template_job" "google_cloud_to_neo4j" { jobSpecUri = "" # neo4jConnectionUri = "" # neo4jConnectionSecretId = "" - # optionsJson = "{token1:value1,token2:value2}" + # optionsJson = "" # readQuery = "" - # inputFilePattern = "gs://your-bucket/path/*.json" - # disabledAlgorithms = "SSLv3, RC4" - # extraFilesToStage = "gs:///file.txt,projects//secrets//versions/" + # inputFilePattern = "" + # disabledAlgorithms = "" + # extraFilesToStage = "" } } ``` diff --git a/v2/googlecloud-to-splunk/README_GCS_To_Splunk.md b/v2/googlecloud-to-splunk/README_GCS_To_Splunk.md index 368074a3e7..6b59be32f6 100644 --- a/v2/googlecloud-to-splunk/README_GCS_To_Splunk.md +++ b/v2/googlecloud-to-splunk/README_GCS_To_Splunk.md @@ -18,32 +18,32 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **invalidOutputPath** : Cloud Storage path where to write objects that could not be converted to Splunk objects or pushed to Splunk. (Example: gs://your-bucket/your-path). -* **inputFileSpec** : The Cloud Storage file pattern to search for CSV files. Example: gs://mybucket/test-*.csv. -* **deadletterTable** : Messages failed to reach the target for all kind of reasons (e.g., mismatched schema, malformed json) are written to this table. (Example: your-project:your-dataset.your-table-name). -* **url** : Splunk Http Event Collector (HEC) url. This should be routable from the VPC in which the pipeline runs. (Example: https://splunk-hec-host:8088). -* **tokenSource** : Source of the token. One of PLAINTEXT, KMS or SECRET_MANAGER. If tokenSource is set to KMS, tokenKMSEncryptionKey and encrypted token must be provided. If tokenSource is set to SECRET_MANAGER, tokenSecretId must be provided. If tokenSource is set to PLAINTEXT, token must be provided. +* **invalidOutputPath**: Cloud Storage path where to write objects that could not be converted to Splunk objects or pushed to Splunk. For example, `gs://your-bucket/your-path`. +* **inputFileSpec**: The Cloud Storage file pattern to search for CSV files. For example, `gs://mybucket/test-*.csv`. +* **deadletterTable**: Messages failed to reach the target for all kind of reasons (e.g., mismatched schema, malformed json) are written to this table. For example, `your-project:your-dataset.your-table-name`. +* **url**: Splunk Http Event Collector (HEC) url. This should be routable from the VPC in which the pipeline runs. For example, `https://splunk-hec-host:8088`. +* **tokenSource**: Source of the token. One of PLAINTEXT, KMS or SECRET_MANAGER. If tokenSource is set to KMS, tokenKMSEncryptionKey and encrypted token must be provided. If tokenSource is set to SECRET_MANAGER, tokenSecretId must be provided. If tokenSource is set to PLAINTEXT, token must be provided. ### Optional parameters -* **containsHeaders** : Input CSV files contain a header record (true/false). Only required if reading CSV files. Defaults to: false. -* **delimiter** : The column delimiter of the input text files. Default: use delimiter provided in csvFormat (Example: ,). -* **csvFormat** : CSV format specification to use for parsing records. Default is: Default. See https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.html for more details. Must match format names exactly found at: https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.Predefined.html. -* **jsonSchemaPath** : The path to the JSON schema. Defaults to: null. (Example: gs://path/to/schema). -* **largeNumFiles** : Set to true if number of files is in the tens of thousands. Defaults to: false. -* **csvFileEncoding** : The CSV file character encoding format. Allowed Values are US-ASCII, ISO-8859-1, UTF-8, and UTF-16. Defaults to: UTF-8. -* **logDetailedCsvConversionErrors** : Set to true to enable detailed error logging when CSV parsing fails. Note that this may expose sensitive data in the logs (e.g., if the CSV file contains passwords). Default: false. -* **token** : Splunk Http Event Collector (HEC) authentication token. Must be provided if the tokenSource is set to PLAINTEXT or KMS. -* **batchCount** : Batch size for sending multiple events to Splunk HEC. Default 1 (no batching). -* **disableCertificateValidation** : Disable SSL certificate validation (true/false). Default false (validation enabled). If true, the certificates are not validated (all certificates are trusted) and `rootCaCertificatePath` parameter is ignored. -* **parallelism** : Maximum number of parallel requests. Default: 1 (no parallelism). -* **tokenKMSEncryptionKey** : The Cloud KMS key to decrypt the HEC token string. This parameter must be provided if the tokenSource is set to KMS. If this parameter is provided, token string should be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. The Key should be in the format projects/{gcp_project}/locations/{key_region}/keyRings/{key_ring}/cryptoKeys/{kms_key_name}. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt (Example: projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name). -* **tokenSecretId** : Secret Manager secret ID for the token. This parameter should be provided if the tokenSource is set to SECRET_MANAGER. Should be in the format projects/{project}/secrets/{secret}/versions/{secret_version}. (Example: projects/your-project-id/secrets/your-secret/versions/your-secret-version). -* **rootCaCertificatePath** : The full URL to root CA certificate in Cloud Storage. The certificate provided in Cloud Storage must be DER-encoded and may be supplied in binary or printable (Base64) encoding. If the certificate is provided in Base64 encoding, it must be bounded at the beginning by -----BEGIN CERTIFICATE-----, and must be bounded at the end by -----END CERTIFICATE-----. If this parameter is provided, this private CA certificate file will be fetched and added to Dataflow worker's trust store in order to verify Splunk HEC endpoint's SSL certificate which is signed by that private CA. If this parameter is not provided, the default trust store is used. (Example: gs://mybucket/mycerts/privateCA.crt). -* **enableBatchLogs** : Parameter which specifies if logs should be enabled for batches written to Splunk. Defaults to: true. -* **enableGzipHttpCompression** : Parameter which specifies if HTTP requests sent to Splunk HEC should be GZIP encoded. Defaults to: true. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. (Example: gs://my-bucket/my-udfs/my_file.js). -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **containsHeaders**: Input CSV files contain a header record (true/false). Only required if reading CSV files. Defaults to: false. +* **delimiter**: The column delimiter of the input text files. Default: `,` For example, `,`. +* **csvFormat**: CSV format specification to use for parsing records. Default is: `Default`. See https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.html for more details. Must match format names exactly found at: https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.Predefined.html. +* **jsonSchemaPath**: The path to the JSON schema. Defaults to `null`. For example, `gs://path/to/schema`. +* **largeNumFiles**: Set to true if number of files is in the tens of thousands. Defaults to `false`. +* **csvFileEncoding**: The CSV file character encoding format. Allowed values are `US-ASCII`, `ISO-8859-1`, `UTF-8`, and `UTF-16`. Defaults to: UTF-8. +* **logDetailedCsvConversionErrors**: Set to `true` to enable detailed error logging when CSV parsing fails. Note that this may expose sensitive data in the logs (e.g., if the CSV file contains passwords). Default: `false`. +* **token**: Splunk Http Event Collector (HEC) authentication token. Must be provided if the tokenSource is set to PLAINTEXT or KMS. +* **batchCount**: Batch size for sending multiple events to Splunk HEC. Default 1 (no batching). +* **disableCertificateValidation**: Disable SSL certificate validation (true/false). Default false (validation enabled). If true, the certificates are not validated (all certificates are trusted) and `rootCaCertificatePath` parameter is ignored. +* **parallelism**: Maximum number of parallel requests. Default: 1 (no parallelism). +* **tokenKMSEncryptionKey**: The Cloud KMS key to decrypt the HEC token string. This parameter must be provided if the tokenSource is set to KMS. If this parameter is provided, token string should be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. The Key should be in the format projects/{gcp_project}/locations/{key_region}/keyRings/{key_ring}/cryptoKeys/{kms_key_name}. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt For example, `projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name`. +* **tokenSecretId**: Secret Manager secret ID for the token. This parameter should be provided if the tokenSource is set to SECRET_MANAGER. Should be in the format projects/{project}/secrets/{secret}/versions/{secret_version}. For example, `projects/your-project-id/secrets/your-secret/versions/your-secret-version`. +* **rootCaCertificatePath**: The full URL to root CA certificate in Cloud Storage. The certificate provided in Cloud Storage must be DER-encoded and may be supplied in binary or printable (Base64) encoding. If the certificate is provided in Base64 encoding, it must be bounded at the beginning by -----BEGIN CERTIFICATE-----, and must be bounded at the end by -----END CERTIFICATE-----. If this parameter is provided, this private CA certificate file will be fetched and added to Dataflow worker's trust store in order to verify Splunk HEC endpoint's SSL certificate which is signed by that private CA. If this parameter is not provided, the default trust store is used. For example, `gs://mybucket/mycerts/privateCA.crt`. +* **enableBatchLogs**: Parameter which specifies if logs should be enabled for batches written to Splunk. Defaults to: true. +* **enableGzipHttpCompression**: Parameter which specifies if HTTP requests sent to Splunk HEC should be GZIP encoded. Defaults to: true. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). ## User-Defined functions (UDFs) @@ -281,15 +281,15 @@ resource "google_dataflow_flex_template_job" "gcs_to_splunk" { name = "gcs-to-splunk" region = var.region parameters = { - invalidOutputPath = "gs://your-bucket/your-path" + invalidOutputPath = "" inputFileSpec = "" - deadletterTable = "your-project:your-dataset.your-table-name" - url = "https://splunk-hec-host:8088" + deadletterTable = "" + url = "" tokenSource = "" # containsHeaders = "false" - # delimiter = "," + # delimiter = "" # csvFormat = "Default" - # jsonSchemaPath = "gs://path/to/schema" + # jsonSchemaPath = "" # largeNumFiles = "false" # csvFileEncoding = "UTF-8" # logDetailedCsvConversionErrors = "false" @@ -297,12 +297,12 @@ resource "google_dataflow_flex_template_job" "gcs_to_splunk" { # batchCount = "" # disableCertificateValidation = "" # parallelism = "" - # tokenKMSEncryptionKey = "projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name" - # tokenSecretId = "projects/your-project-id/secrets/your-secret/versions/your-secret-version" - # rootCaCertificatePath = "gs://mybucket/mycerts/privateCA.crt" + # tokenKMSEncryptionKey = "" + # tokenSecretId = "" + # rootCaCertificatePath = "" # enableBatchLogs = "true" # enableGzipHttpCompression = "true" - # javascriptTextTransformGcsPath = "gs://my-bucket/my-udfs/my_file.js" + # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" } } diff --git a/v2/googlecloud-to-splunk/README_GCS_To_Splunk_Xlang.md b/v2/googlecloud-to-splunk/README_GCS_To_Splunk_Xlang.md index 6810a88c9d..062d49e1b2 100644 --- a/v2/googlecloud-to-splunk/README_GCS_To_Splunk_Xlang.md +++ b/v2/googlecloud-to-splunk/README_GCS_To_Splunk_Xlang.md @@ -18,32 +18,32 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **invalidOutputPath** : Cloud Storage path where to write objects that could not be converted to Splunk objects or pushed to Splunk. (Example: gs://your-bucket/your-path). -* **inputFileSpec** : The Cloud Storage file pattern to search for CSV files. Example: gs://mybucket/test-*.csv. -* **deadletterTable** : Messages failed to reach the target for all kind of reasons (e.g., mismatched schema, malformed json) are written to this table. (Example: your-project:your-dataset.your-table-name). -* **url** : Splunk Http Event Collector (HEC) url. This should be routable from the VPC in which the pipeline runs. (Example: https://splunk-hec-host:8088). -* **tokenSource** : Source of the token. One of PLAINTEXT, KMS or SECRET_MANAGER. If tokenSource is set to KMS, tokenKMSEncryptionKey and encrypted token must be provided. If tokenSource is set to SECRET_MANAGER, tokenSecretId must be provided. If tokenSource is set to PLAINTEXT, token must be provided. +* **invalidOutputPath**: Cloud Storage path where to write objects that could not be converted to Splunk objects or pushed to Splunk. For example, `gs://your-bucket/your-path`. +* **inputFileSpec**: The Cloud Storage file pattern to search for CSV files. For example, `gs://mybucket/test-*.csv`. +* **deadletterTable**: Messages failed to reach the target for all kind of reasons (e.g., mismatched schema, malformed json) are written to this table. For example, `your-project:your-dataset.your-table-name`. +* **url**: Splunk Http Event Collector (HEC) url. This should be routable from the VPC in which the pipeline runs. For example, `https://splunk-hec-host:8088`. +* **tokenSource**: Source of the token. One of PLAINTEXT, KMS or SECRET_MANAGER. If tokenSource is set to KMS, tokenKMSEncryptionKey and encrypted token must be provided. If tokenSource is set to SECRET_MANAGER, tokenSecretId must be provided. If tokenSource is set to PLAINTEXT, token must be provided. ### Optional parameters -* **containsHeaders** : Input CSV files contain a header record (true/false). Only required if reading CSV files. Defaults to: false. -* **delimiter** : The column delimiter of the input text files. Default: use delimiter provided in csvFormat (Example: ,). -* **csvFormat** : CSV format specification to use for parsing records. Default is: Default. See https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.html for more details. Must match format names exactly found at: https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.Predefined.html. -* **jsonSchemaPath** : The path to the JSON schema. Defaults to: null. (Example: gs://path/to/schema). -* **largeNumFiles** : Set to true if number of files is in the tens of thousands. Defaults to: false. -* **csvFileEncoding** : The CSV file character encoding format. Allowed Values are US-ASCII, ISO-8859-1, UTF-8, and UTF-16. Defaults to: UTF-8. -* **logDetailedCsvConversionErrors** : Set to true to enable detailed error logging when CSV parsing fails. Note that this may expose sensitive data in the logs (e.g., if the CSV file contains passwords). Default: false. -* **token** : Splunk Http Event Collector (HEC) authentication token. Must be provided if the tokenSource is set to PLAINTEXT or KMS. -* **batchCount** : Batch size for sending multiple events to Splunk HEC. Default 1 (no batching). -* **disableCertificateValidation** : Disable SSL certificate validation (true/false). Default false (validation enabled). If true, the certificates are not validated (all certificates are trusted) and `rootCaCertificatePath` parameter is ignored. -* **parallelism** : Maximum number of parallel requests. Default: 1 (no parallelism). -* **tokenKMSEncryptionKey** : The Cloud KMS key to decrypt the HEC token string. This parameter must be provided if the tokenSource is set to KMS. If this parameter is provided, token string should be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. The Key should be in the format projects/{gcp_project}/locations/{key_region}/keyRings/{key_ring}/cryptoKeys/{kms_key_name}. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt (Example: projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name). -* **tokenSecretId** : Secret Manager secret ID for the token. This parameter should be provided if the tokenSource is set to SECRET_MANAGER. Should be in the format projects/{project}/secrets/{secret}/versions/{secret_version}. (Example: projects/your-project-id/secrets/your-secret/versions/your-secret-version). -* **rootCaCertificatePath** : The full URL to root CA certificate in Cloud Storage. The certificate provided in Cloud Storage must be DER-encoded and may be supplied in binary or printable (Base64) encoding. If the certificate is provided in Base64 encoding, it must be bounded at the beginning by -----BEGIN CERTIFICATE-----, and must be bounded at the end by -----END CERTIFICATE-----. If this parameter is provided, this private CA certificate file will be fetched and added to Dataflow worker's trust store in order to verify Splunk HEC endpoint's SSL certificate which is signed by that private CA. If this parameter is not provided, the default trust store is used. (Example: gs://mybucket/mycerts/privateCA.crt). -* **enableBatchLogs** : Parameter which specifies if logs should be enabled for batches written to Splunk. Defaults to: true. -* **enableGzipHttpCompression** : Parameter which specifies if HTTP requests sent to Splunk HEC should be GZIP encoded. Defaults to: true. -* **pythonExternalTextTransformGcsPath** : The Cloud Storage path pattern for the Python code containing your user-defined functions. (Example: gs://your-bucket/your-function.py). -* **pythonExternalTextTransformFunctionName** : The name of the function to call from your Python file. Use only letters, digits, and underscores. (Example: 'transform' or 'transform_udf1'). +* **containsHeaders**: Input CSV files contain a header record (true/false). Only required if reading CSV files. Defaults to: false. +* **delimiter**: The column delimiter of the input text files. Default: `,` For example, `,`. +* **csvFormat**: CSV format specification to use for parsing records. Default is: `Default`. See https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.html for more details. Must match format names exactly found at: https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.Predefined.html. +* **jsonSchemaPath**: The path to the JSON schema. Defaults to `null`. For example, `gs://path/to/schema`. +* **largeNumFiles**: Set to true if number of files is in the tens of thousands. Defaults to `false`. +* **csvFileEncoding**: The CSV file character encoding format. Allowed values are `US-ASCII`, `ISO-8859-1`, `UTF-8`, and `UTF-16`. Defaults to: UTF-8. +* **logDetailedCsvConversionErrors**: Set to `true` to enable detailed error logging when CSV parsing fails. Note that this may expose sensitive data in the logs (e.g., if the CSV file contains passwords). Default: `false`. +* **token**: Splunk Http Event Collector (HEC) authentication token. Must be provided if the tokenSource is set to PLAINTEXT or KMS. +* **batchCount**: Batch size for sending multiple events to Splunk HEC. Default 1 (no batching). +* **disableCertificateValidation**: Disable SSL certificate validation (true/false). Default false (validation enabled). If true, the certificates are not validated (all certificates are trusted) and `rootCaCertificatePath` parameter is ignored. +* **parallelism**: Maximum number of parallel requests. Default: 1 (no parallelism). +* **tokenKMSEncryptionKey**: The Cloud KMS key to decrypt the HEC token string. This parameter must be provided if the tokenSource is set to KMS. If this parameter is provided, token string should be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. The Key should be in the format projects/{gcp_project}/locations/{key_region}/keyRings/{key_ring}/cryptoKeys/{kms_key_name}. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt For example, `projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name`. +* **tokenSecretId**: Secret Manager secret ID for the token. This parameter should be provided if the tokenSource is set to SECRET_MANAGER. Should be in the format projects/{project}/secrets/{secret}/versions/{secret_version}. For example, `projects/your-project-id/secrets/your-secret/versions/your-secret-version`. +* **rootCaCertificatePath**: The full URL to root CA certificate in Cloud Storage. The certificate provided in Cloud Storage must be DER-encoded and may be supplied in binary or printable (Base64) encoding. If the certificate is provided in Base64 encoding, it must be bounded at the beginning by -----BEGIN CERTIFICATE-----, and must be bounded at the end by -----END CERTIFICATE-----. If this parameter is provided, this private CA certificate file will be fetched and added to Dataflow worker's trust store in order to verify Splunk HEC endpoint's SSL certificate which is signed by that private CA. If this parameter is not provided, the default trust store is used. For example, `gs://mybucket/mycerts/privateCA.crt`. +* **enableBatchLogs**: Parameter which specifies if logs should be enabled for batches written to Splunk. Defaults to: true. +* **enableGzipHttpCompression**: Parameter which specifies if HTTP requests sent to Splunk HEC should be GZIP encoded. Defaults to: true. +* **pythonExternalTextTransformGcsPath**: The Cloud Storage path pattern for the Python code containing your user-defined functions. For example, `gs://your-bucket/your-function.py`. +* **pythonExternalTextTransformFunctionName**: The name of the function to call from your Python file. Use only letters, digits, and underscores. For example, `'transform' or 'transform_udf1'`. @@ -271,15 +271,15 @@ resource "google_dataflow_flex_template_job" "gcs_to_splunk_xlang" { name = "gcs-to-splunk-xlang" region = var.region parameters = { - invalidOutputPath = "gs://your-bucket/your-path" + invalidOutputPath = "" inputFileSpec = "" - deadletterTable = "your-project:your-dataset.your-table-name" - url = "https://splunk-hec-host:8088" + deadletterTable = "" + url = "" tokenSource = "" # containsHeaders = "false" - # delimiter = "," + # delimiter = "" # csvFormat = "Default" - # jsonSchemaPath = "gs://path/to/schema" + # jsonSchemaPath = "" # largeNumFiles = "false" # csvFileEncoding = "UTF-8" # logDetailedCsvConversionErrors = "false" @@ -287,13 +287,13 @@ resource "google_dataflow_flex_template_job" "gcs_to_splunk_xlang" { # batchCount = "" # disableCertificateValidation = "" # parallelism = "" - # tokenKMSEncryptionKey = "projects/your-project-id/locations/global/keyRings/your-keyring/cryptoKeys/your-key-name" - # tokenSecretId = "projects/your-project-id/secrets/your-secret/versions/your-secret-version" - # rootCaCertificatePath = "gs://mybucket/mycerts/privateCA.crt" + # tokenKMSEncryptionKey = "" + # tokenSecretId = "" + # rootCaCertificatePath = "" # enableBatchLogs = "true" # enableGzipHttpCompression = "true" - # pythonExternalTextTransformGcsPath = "gs://your-bucket/your-function.py" - # pythonExternalTextTransformFunctionName = "'transform' or 'transform_udf1'" + # pythonExternalTextTransformGcsPath = "" + # pythonExternalTextTransformFunctionName = "" } } ``` diff --git a/v2/jdbc-to-googlecloud/README_Jdbc_to_BigQuery_Flex.md b/v2/jdbc-to-googlecloud/README_Jdbc_to_BigQuery_Flex.md index 34c940e11f..7e930355b3 100644 --- a/v2/jdbc-to-googlecloud/README_Jdbc_to_BigQuery_Flex.md +++ b/v2/jdbc-to-googlecloud/README_Jdbc_to_BigQuery_Flex.md @@ -26,36 +26,33 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **driverJars** : The comma-separated list of driver JAR files. (Example: gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar). -* **driverClassName** : The JDBC driver class name. (Example: com.mysql.jdbc.Driver). -* **connectionURL** : The JDBC connection URL string. For example, `jdbc:mysql://some-host:3306/sampledb`. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. Remove whitespace characters from the Base64-encoded string. Note the difference between an Oracle non-RAC database connection string (`jdbc:oracle:thin:@some-host::`) and an Oracle RAC database connection string (`jdbc:oracle:thin:@//some-host[:]/`). (Example: jdbc:mysql://some-host:3306/sampledb). -* **outputTable** : The BigQuery output table location. (Example: :.). -* **bigQueryLoadingTemporaryDirectory** : The temporary directory for the BigQuery loading process. (Example: gs://your-bucket/your-files/temp_dir). +* **driverJars**: The comma-separated list of driver JAR files. For example, `gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar`. +* **driverClassName**: The JDBC driver class name. For example, `com.mysql.jdbc.Driver`. +* **connectionURL**: The JDBC connection URL string. For example, `jdbc:mysql://some-host:3306/sampledb`. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. Remove whitespace characters from the Base64-encoded string. Note the difference between an Oracle non-RAC database connection string (`jdbc:oracle:thin:@some-host::`) and an Oracle RAC database connection string (`jdbc:oracle:thin:@//some-host[:]/`). For example, `jdbc:mysql://some-host:3306/sampledb`. +* **outputTable**: The BigQuery output table location. For example, `:.`. +* **bigQueryLoadingTemporaryDirectory**: The temporary directory for the BigQuery loading process. For example, `gs://your-bucket/your-files/temp_dir`. ### Optional parameters -* **connectionProperties** : The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`.For more information, see Configuration Properties (https://dev.mysql.com/doc/connector-j/en/connector-j-reference-configuration-properties.html) in the MySQL documentation. (Example: unicode=true;characterEncoding=UTF-8). -* **username** : The username to use for the JDBC connection. Can be passed in as a string that's encrypted with a Cloud KMS key, or can be a Secret Manager secret in the form projects/{project}/secrets/{secret}/versions/{secret_version}. -* **password** : The password to use for the JDBC connection. Can be passed in as a string that's encrypted with a Cloud KMS key, or can be a Secret Manager secret in the form projects/{project}/secrets/{secret}/versions/{secret_version}. -* **query** : The query to run on the source to extract the data. Note that some JDBC SQL and BigQuery types, although sharing the same name, have some differences. Some important SQL -> BigQuery type mappings to keep in mind are: -DATETIME --> TIMESTAMP - -Type casting may be required if your schemas do not match. This parameter can be set to a gs:// path pointing to a file in Cloud Storage to load the query from. The file encoding should be UTF-8. (Example: select * from sampledb.sample_table). -* **KMSEncryptionKey** : The Cloud KMS encryption key to use to decrypt the username, password, and connection string. If you pass in a Cloud KMS key, you must also encrypt the username, password, and connection string. (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **useColumnAlias** : If set to `true`, the pipeline uses the column alias (`AS`) instead of the column name to map the rows to BigQuery. Defaults to `false`. -* **isTruncate** : If set to `true`, the pipeline truncates before loading data into BigQuery. Defaults to `false`, which causes the pipeline to append data. -* **partitionColumn** : If this parameter is provided with the name of the `table` defined as an optional parameter, JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only supports `Long` partition columns. -* **table** : The table to read from when using partitions. This parameter also accepts a subquery in parentheses. (Example: (select id, name from Person) as subq). -* **numPartitions** : The number of partitions. With the lower and upper bound, this value forms partition strides for generated `WHERE` clause expressions that are used to split the partition column evenly. When the input is less than `1`, the number is set to `1`. -* **lowerBound** : The lower bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. -* **upperBound** : The upper bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. -* **fetchSize** : The number of rows to be fetched from database at a time. Not used for partitioned reads. Defaults to: 50000. -* **createDisposition** : The BigQuery CreateDisposition to use. For example, `CREATE_IF_NEEDED` or `CREATE_NEVER`. Defaults to: CREATE_NEVER. -* **bigQuerySchemaPath** : The Cloud Storage path for the BigQuery JSON schema. If `createDisposition` is set to CREATE_IF_NEEDED, this parameter must be specified. (Example: gs://your-bucket/your-schema.json). -* **disabledAlgorithms** : Comma separated algorithms to disable. If this value is set to none, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. (Example: SSLv3, RC4). -* **extraFilesToStage** : Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. (Example: gs:///file.txt,projects//secrets//versions/). -* **useStorageWriteApi** : If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **connectionProperties**: The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`.For more information, see Configuration Properties (https://dev.mysql.com/doc/connector-j/8.1/en/connector-j-reference-configuration-properties.html) in the MySQL documentation. For example, `unicode=true;characterEncoding=UTF-8`. +* **username**: The username to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. Remove whitespace characters from the Base64-encoded string. +* **password**: The password to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. Remove whitespace characters from the Base64-encoded string. +* **query**: The query to run on the source to extract the data. Note that some JDBC SQL and BigQuery types, although sharing the same name, have some differences. Some important SQL -> BigQuery type mappings to keep in mind are `DATETIME --> TIMESTAMP`. Type casting may be required if your schemas do not match. For example, `select * from sampledb.sample_table`. +* **KMSEncryptionKey**: The Cloud KMS encryption key to use to decrypt the username, password, and connection string. If you pass in a Cloud KMS key, you must also encrypt the username, password, and connection string. For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **useColumnAlias**: If set to `true`, the pipeline uses the column alias (`AS`) instead of the column name to map the rows to BigQuery. Defaults to `false`. +* **isTruncate**: If set to `true`, the pipeline truncates before loading data into BigQuery. Defaults to `false`, which causes the pipeline to append data. +* **partitionColumn**: If this parameter is provided with the name of the `table` defined as an optional parameter, JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only supports `Long` partition columns. +* **table**: The table to read from when using partitions. This parameter also accepts a subquery in parentheses. For example, `(select id, name from Person) as subq`. +* **numPartitions**: The number of partitions. With the lower and upper bound, this value forms partition strides for generated `WHERE` clause expressions that are used to split the partition column evenly. When the input is less than `1`, the number is set to `1`. +* **lowerBound**: The lower bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. +* **upperBound**: The upper bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. +* **fetchSize**: The number of rows to be fetched from database at a time. Not used for partitioned reads. Defaults to: 50000. +* **createDisposition**: The BigQuery CreateDisposition to use. For example, `CREATE_IF_NEEDED` or `CREATE_NEVER`. Defaults to: CREATE_NEVER. +* **bigQuerySchemaPath**: The Cloud Storage path for the BigQuery JSON schema. If `createDisposition` is set to `CREATE_IF_NEEDED`, this parameter must be specified. For example, `gs://your-bucket/your-schema.json`. +* **disabledAlgorithms**: Comma separated algorithms to disable. If this value is set to `none`, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. For example, `SSLv3, RC4`. +* **extraFilesToStage**: Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. For example, `gs:///file.txt,projects//secrets//versions/`. +* **useStorageWriteApi**: If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. @@ -286,28 +283,28 @@ resource "google_dataflow_flex_template_job" "jdbc_to_bigquery_flex" { name = "jdbc-to-bigquery-flex" region = var.region parameters = { - driverJars = "gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar" - driverClassName = "com.mysql.jdbc.Driver" - connectionURL = "jdbc:mysql://some-host:3306/sampledb" - outputTable = ":." - bigQueryLoadingTemporaryDirectory = "gs://your-bucket/your-files/temp_dir" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" + driverJars = "" + driverClassName = "" + connectionURL = "" + outputTable = "" + bigQueryLoadingTemporaryDirectory = "" + # connectionProperties = "" # username = "" # password = "" - # query = "select * from sampledb.sample_table" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" + # query = "" + # KMSEncryptionKey = "" # useColumnAlias = "false" # isTruncate = "false" # partitionColumn = "" - # table = "(select id, name from Person) as subq" + # table = "" # numPartitions = "" # lowerBound = "" # upperBound = "" # fetchSize = "50000" # createDisposition = "CREATE_NEVER" - # bigQuerySchemaPath = "gs://your-bucket/your-schema.json" - # disabledAlgorithms = "SSLv3, RC4" - # extraFilesToStage = "gs:///file.txt,projects//secrets//versions/" + # bigQuerySchemaPath = "" + # disabledAlgorithms = "" + # extraFilesToStage = "" # useStorageWriteApi = "false" # useStorageWriteApiAtLeastOnce = "false" } diff --git a/v2/jdbc-to-googlecloud/README_Jdbc_to_PubSub.md b/v2/jdbc-to-googlecloud/README_Jdbc_to_PubSub.md index 933422b961..2fba2e3cde 100644 --- a/v2/jdbc-to-googlecloud/README_Jdbc_to_PubSub.md +++ b/v2/jdbc-to-googlecloud/README_Jdbc_to_PubSub.md @@ -18,20 +18,20 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **driverClassName** : The JDBC driver class name. (Example: com.mysql.jdbc.Driver). -* **connectionUrl** : The JDBC connection URL string. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example: 'echo -n "jdbc:mysql://some-host:3306/sampledb" | gcloud kms encrypt --location= --keyring= --key= --plaintext-file=- --ciphertext-file=- | base64' (Example: jdbc:mysql://some-host:3306/sampledb). -* **driverJars** : Comma-separated Cloud Storage paths for JDBC drivers. (Example: gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar). -* **query** : The query to run on the source to extract the data. (Example: select * from sampledb.sample_table). -* **outputTopic** : The Pub/Sub topic to publish to, in the format projects//topics/. (Example: projects/your-project-id/topics/your-topic-name). +* **driverClassName**: The JDBC driver class name. For example, `com.mysql.jdbc.Driver`. +* **connectionUrl**: The JDBC connection URL string. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example: 'echo -n "jdbc:mysql://some-host:3306/sampledb" | gcloud kms encrypt --location= --keyring= --key= --plaintext-file=- --ciphertext-file=- | base64' For example, `jdbc:mysql://some-host:3306/sampledb`. +* **driverJars**: Comma-separated Cloud Storage paths for JDBC drivers. For example, `gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar`. +* **query**: The query to run on the source to extract the data. For example, `select * from sampledb.sample_table`. +* **outputTopic**: The Pub/Sub topic to publish to. For example, `projects//topics/`. ### Optional parameters -* **username** : The username to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_username' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. -* **password** : The password to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_password' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. -* **connectionProperties** : The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`. (Example: unicode=true;characterEncoding=UTF-8). -* **KMSEncryptionKey** : The Cloud KMS Encryption Key to use to decrypt the username, password, and connection string. If a Cloud KMS key is passed in, the username, password, and connection string must all be passed in encrypted and base64 encoded. (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **disabledAlgorithms** : Comma separated algorithms to disable. If this value is set to none, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. (Example: SSLv3, RC4). -* **extraFilesToStage** : Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. (Example: gs:///file.txt,projects//secrets//versions/). +* **username**: The username to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_username' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. +* **password**: The password to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_password' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. +* **connectionProperties**: The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`. For example, `unicode=true;characterEncoding=UTF-8`. +* **KMSEncryptionKey**: The Cloud KMS Encryption Key to use to decrypt the username, password, and connection string. If a Cloud KMS key is passed in, the username, password, and connection string must all be passed in encrypted and base64 encoded. For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **disabledAlgorithms**: Comma separated algorithms to disable. If this value is set to `none`, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. For example, `SSLv3, RC4`. +* **extraFilesToStage**: Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. For example, `gs:///file.txt,projects//secrets//versions/`. @@ -223,17 +223,17 @@ resource "google_dataflow_flex_template_job" "jdbc_to_pubsub" { name = "jdbc-to-pubsub" region = var.region parameters = { - driverClassName = "com.mysql.jdbc.Driver" - connectionUrl = "jdbc:mysql://some-host:3306/sampledb" - driverJars = "gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar" - query = "select * from sampledb.sample_table" - outputTopic = "projects/your-project-id/topics/your-topic-name" + driverClassName = "" + connectionUrl = "" + driverJars = "" + query = "" + outputTopic = "" # username = "" # password = "" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" - # disabledAlgorithms = "SSLv3, RC4" - # extraFilesToStage = "gs:///file.txt,projects//secrets//versions/" + # connectionProperties = "" + # KMSEncryptionKey = "" + # disabledAlgorithms = "" + # extraFilesToStage = "" } } ``` diff --git a/v2/jdbc-to-googlecloud/README_Jdbc_to_PubSub_Auto.md b/v2/jdbc-to-googlecloud/README_Jdbc_to_PubSub_Auto.md index ed9e24018d..959bd0103e 100644 --- a/v2/jdbc-to-googlecloud/README_Jdbc_to_PubSub_Auto.md +++ b/v2/jdbc-to-googlecloud/README_Jdbc_to_PubSub_Auto.md @@ -15,23 +15,23 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **driverClassName** : JDBC driver class name to use. (Example: com.mysql.jdbc.Driver). -* **connectionUrl** : Url connection string to connect to the JDBC source. Connection string can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. (Example: jdbc:mysql://some-host:3306/sampledb). -* **driverJars** : Comma separate Cloud Storage paths for JDBC drivers. (Example: gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar). -* **query** : Query to be executed on the source to extract the data. (Example: select * from sampledb.sample_table). -* **outputTopic** : The name of the topic to which data should published, in the format of 'projects/your-project-id/topics/your-topic-name' (Example: projects/your-project-id/topics/your-topic-name). +* **driverClassName**: JDBC driver class name to use. For example, `com.mysql.jdbc.Driver`. +* **connectionUrl**: Url connection string to connect to the JDBC source. Connection string can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. For example, `jdbc:mysql://some-host:3306/sampledb`. +* **driverJars**: Comma separate Cloud Storage paths for JDBC drivers. For example, `gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar`. +* **query**: Query to be executed on the source to extract the data. For example, `select * from sampledb.sample_table`. +* **outputTopic**: The name of the topic to publish data to. For example, `projects//topics/`. ### Optional parameters -* **username** : User name to be used for the JDBC connection. User name can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. -* **password** : Password to be used for the JDBC connection. Password can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. -* **connectionProperties** : Properties string to use for the JDBC connection. Format of the string must be [propertyName=property;]*. (Example: unicode=true;characterEncoding=UTF-8). -* **KMSEncryptionKey** : If this parameter is provided, password, user name and connection string should all be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **partitionColumn** : If this parameter is provided (along with `table`), JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only Long partition columns are supported. -* **table** : Table to read from using partitions. This parameter also accepts a subquery in parentheses. (Example: (select id, name from Person) as subq). -* **numPartitions** : The number of partitions. This, along with the lower and upper bound, form partitions strides for generated WHERE clause expressions used to split the partition column evenly. When the input is less than 1, the number is set to 1. -* **lowerBound** : Lower bound used in the partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). -* **upperBound** : Upper bound used in partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). +* **username**: User name to be used for the JDBC connection. User name can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. +* **password**: Password to be used for the JDBC connection. Password can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. +* **connectionProperties**: Properties string to use for the JDBC connection. Format of the string must be [propertyName=property;]*. For example, `unicode=true;characterEncoding=UTF-8`. +* **KMSEncryptionKey**: If this parameter is provided, password, user name and connection string should all be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **partitionColumn**: If this parameter is provided (along with `table`), JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only Long partition columns are supported. +* **table**: Table to read from using partitions. This parameter also accepts a subquery in parentheses. For example, `(select id, name from Person) as subq`. +* **numPartitions**: The number of partitions. This, along with the lower and upper bound, form partitions strides for generated WHERE clause expressions used to split the partition column evenly. When the input is less than 1, the number is set to 1. +* **lowerBound**: Lower bound used in the partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). +* **upperBound**: Upper bound used in partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). @@ -232,17 +232,17 @@ resource "google_dataflow_flex_template_job" "jdbc_to_pubsub_auto" { name = "jdbc-to-pubsub-auto" region = var.region parameters = { - driverClassName = "com.mysql.jdbc.Driver" - connectionUrl = "jdbc:mysql://some-host:3306/sampledb" - driverJars = "gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar" - query = "select * from sampledb.sample_table" - outputTopic = "projects/your-project-id/topics/your-topic-name" + driverClassName = "" + connectionUrl = "" + driverJars = "" + query = "" + outputTopic = "" # username = "" # password = "" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" + # connectionProperties = "" + # KMSEncryptionKey = "" # partitionColumn = "" - # table = "(select id, name from Person) as subq" + # table = "
" # numPartitions = "" # lowerBound = "" # upperBound = "" diff --git a/v2/jdbc-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/JdbcToBigQueryOptions.java b/v2/jdbc-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/JdbcToBigQueryOptions.java index 5fd75bb9f1..bb767126ae 100644 --- a/v2/jdbc-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/JdbcToBigQueryOptions.java +++ b/v2/jdbc-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/JdbcToBigQueryOptions.java @@ -103,11 +103,8 @@ public interface JdbcToBigQueryOptions description = "JDBC source SQL query", helpText = "The query to run on the source to extract the data. Note that some JDBC SQL and BigQuery types, although sharing the same name, have some differences. " - + "Some important SQL -> BigQuery type mappings to keep in mind are:\n" - + "DATETIME --> TIMESTAMP\n" - + "\nType casting may be required if your schemas do not match. " - + "This parameter can be set to a gs:// path pointing to a file in Cloud Storage to load the query from. " - + "The file encoding should be UTF-8.", + + "Some important SQL -> BigQuery type mappings to keep in mind are `DATETIME --> TIMESTAMP`." + + " Type casting may be required if your schemas do not match.", example = "select * from sampledb.sample_table") String getQuery(); @@ -251,7 +248,7 @@ public interface JdbcToBigQueryOptions optional = true, description = "Cloud Storage path to BigQuery JSON schema", helpText = - "The Cloud Storage path for the BigQuery JSON schema. If `createDisposition` is set to CREATE_IF_NEEDED, this parameter must be specified.", + "The Cloud Storage path for the BigQuery JSON schema. If `createDisposition` is set to `CREATE_IF_NEEDED`, this parameter must be specified.", example = "gs://your-bucket/your-schema.json") String getBigQuerySchemaPath(); diff --git a/v2/jdbc-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/JdbcToPubsubOptions.java b/v2/jdbc-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/JdbcToPubsubOptions.java index bdd3c21d96..64a6b97406 100644 --- a/v2/jdbc-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/JdbcToPubsubOptions.java +++ b/v2/jdbc-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/options/JdbcToPubsubOptions.java @@ -109,9 +109,8 @@ public interface JdbcToPubsubOptions extends CommonTemplateOptions { order = 8, groupName = "Target", description = "Output Pub/Sub topic", - helpText = - "The Pub/Sub topic to publish to, in the format projects//topics/.", - example = "projects/your-project-id/topics/your-topic-name") + helpText = "The Pub/Sub topic to publish to.", + example = "projects//topics/") @Validation.Required String getOutputTopic(); diff --git a/v2/jms-to-pubsub/README_Jms_to_PubSub.md b/v2/jms-to-pubsub/README_Jms_to_PubSub.md index 62410db107..813d400df4 100644 --- a/v2/jms-to-pubsub/README_Jms_to_PubSub.md +++ b/v2/jms-to-pubsub/README_Jms_to_PubSub.md @@ -17,15 +17,15 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputName** : The name of the JMS topic or queue that data is read from. (Example: queue). -* **inputType** : The JMS destination type to read data from. Can be a queue or a topic. (Example: queue). -* **outputTopic** : The name of the Pub/Sub topic to publish data to, in the format `projects//topics/`. (Example: projects/your-project-id/topics/your-topic-name). -* **username** : The username to use for authentication on the JMS server. (Example: sampleusername). -* **password** : The password associated with the provided username. (Example: samplepassword). +* **inputName**: The name of the JMS topic or queue that data is read from. For example, `queue`. +* **inputType**: The JMS destination type to read data from. Can be a queue or a topic. For example, `queue`. +* **outputTopic**: The name of the Pub/Sub topic to publish data to. For example, `projects//topics/`. +* **username**: The username to use for authentication on the JMS server. For example, `sampleusername`. +* **password**: The password associated with the provided username. For example, `samplepassword`. ### Optional parameters -* **jmsServer** : The JMS (ActiveMQ) Server IP. (Example: tcp://10.0.0.1:61616). +* **jmsServer**: The JMS (ActiveMQ) Server IP. For example, `tcp://10.0.0.1:61616`. @@ -202,12 +202,12 @@ resource "google_dataflow_flex_template_job" "jms_to_pubsub" { name = "jms-to-pubsub" region = var.region parameters = { - inputName = "queue" - inputType = "queue" - outputTopic = "projects/your-project-id/topics/your-topic-name" - username = "sampleusername" - password = "samplepassword" - # jmsServer = "tcp://10.0.0.1:61616" + inputName = "" + inputType = "" + outputTopic = "" + username = "" + password = "" + # jmsServer = "" } } ``` diff --git a/v2/jms-to-pubsub/src/main/java/com/google/cloud/teleport/v2/templates/JmsToPubsub.java b/v2/jms-to-pubsub/src/main/java/com/google/cloud/teleport/v2/templates/JmsToPubsub.java index 0c83a8292f..2c416dc9fd 100644 --- a/v2/jms-to-pubsub/src/main/java/com/google/cloud/teleport/v2/templates/JmsToPubsub.java +++ b/v2/jms-to-pubsub/src/main/java/com/google/cloud/teleport/v2/templates/JmsToPubsub.java @@ -178,9 +178,8 @@ public interface JmsToPubsubOptions extends PipelineOptions { order = 4, groupName = "Target", description = "Output Pub/Sub topic", - helpText = - "The name of the Pub/Sub topic to publish data to, in the format `projects//topics/`.", - example = "projects/your-project-id/topics/your-topic-name") + helpText = "The name of the Pub/Sub topic to publish data to.", + example = "projects//topics/") @Validation.Required String getOutputTopic(); diff --git a/v2/kafka-common/src/main/java/com/google/cloud/teleport/v2/kafka/options/KafkaReadOptions.java b/v2/kafka-common/src/main/java/com/google/cloud/teleport/v2/kafka/options/KafkaReadOptions.java index 6d42248293..91717c2bf2 100644 --- a/v2/kafka-common/src/main/java/com/google/cloud/teleport/v2/kafka/options/KafkaReadOptions.java +++ b/v2/kafka-common/src/main/java/com/google/cloud/teleport/v2/kafka/options/KafkaReadOptions.java @@ -102,18 +102,11 @@ final class Offset { }, description = "Kafka Source Authentication Mode", helpText = - ("The mode of authentication to use with the Kafka cluster. " - + "Use " - + KafkaAuthenticationMethod.NONE - + " for no authentication, " - + KafkaAuthenticationMethod.SASL_PLAIN - + " for SASL/PLAIN username and password, " - + KafkaAuthenticationMethod.TLS - + "for certificate-based authentication. " - + KafkaAuthenticationMethod.APPLICATION_DEFAULT_CREDENTIALS - + " should be used only for Google Cloud Apache Kafka for BigQuery cluster since " - + "This allow you to authenticate with Google Cloud Apache Kafka for BigQuery using application default credentials")) - @Default.String(KafkaAuthenticationMethod.APPLICATION_DEFAULT_CREDENTIALS) + "The mode of authentication to use with the Kafka cluster. " + + "Use `KafkaAuthenticationMethod.NONE` for no authentication, `KafkaAuthenticationMethod.SASL_PLAIN` for SASL/PLAIN username and password, " + + "and `KafkaAuthenticationMethod.TLS` for certificate-based authentication. `KafkaAuthenticationMethod.APPLICATION_DEFAULT_CREDENTIALS` " + + "should be used only for Google Cloud Apache Kafka for BigQuery cluster, it allows to authenticate using application default credentials.") + @Default.String(KafkaAuthenticationMethod.SASL_PLAIN) String getKafkaReadAuthenticationMode(); void setKafkaReadAuthenticationMode(String value); @@ -127,7 +120,7 @@ final class Offset { description = "Secret Version ID For Kafka SASL/PLAIN Username", helpText = "The Google Cloud Secret Manager secret ID that contains the Kafka username " - + "to use with SASL_PLAIN authentication.", + + "to use with `SASL_PLAIN` authentication.", example = "projects//secrets//versions/") @Default.String("") String getKafkaReadUsernameSecretId(); @@ -142,7 +135,7 @@ final class Offset { optional = true, description = "Secret Version ID For Kafka SASL/PLAIN Password", helpText = - "The Google Cloud Secret Manager secret ID that contains the Kafka password to use with SASL_PLAIN authentication.", + "The Google Cloud Secret Manager secret ID that contains the Kafka password to use with `SASL_PLAIN` authentication.", example = "projects//secrets//versions/") @Default.String("") String getKafkaReadPasswordSecretId(); diff --git a/v2/kafka-common/src/main/java/com/google/cloud/teleport/v2/kafka/options/SchemaRegistryOptions.java b/v2/kafka-common/src/main/java/com/google/cloud/teleport/v2/kafka/options/SchemaRegistryOptions.java index 36c93b5f28..932c3420c7 100644 --- a/v2/kafka-common/src/main/java/com/google/cloud/teleport/v2/kafka/options/SchemaRegistryOptions.java +++ b/v2/kafka-common/src/main/java/com/google/cloud/teleport/v2/kafka/options/SchemaRegistryOptions.java @@ -34,7 +34,7 @@ public interface SchemaRegistryOptions extends PipelineOptions { }, description = "Kafka Message Format", helpText = - "The format of the Kafka messages to read. The supported values are AVRO_CONFLUENT_WIRE_FORMAT (Confluent Schema Registry encoded Avro), AVRO_BINARY_ENCODING (Plain binary Avro), and JSON.") + "The format of the Kafka messages to read. The supported values are `AVRO_CONFLUENT_WIRE_FORMAT` (Confluent Schema Registry encoded Avro), `AVRO_BINARY_ENCODING` (Plain binary Avro), and `JSON`.") @Default.String(MessageFormatConstants.AVRO_CONFLUENT_WIRE_FORMAT) String getMessageFormat(); @@ -53,9 +53,9 @@ public interface SchemaRegistryOptions extends PipelineOptions { description = "Schema Source", optional = true, helpText = - "The Kafka schema format. Can be provided as SINGLE_SCHEMA_FILE or SCHEMA_REGISTRY. " - + "If SINGLE_SCHEMA_FILE is specified, all messages should have the schema mentioned in the avro schema file. " - + "If SCHEMA_REGISTRY is specified, the messages can have either a single schema or multiple schemas.") + "The Kafka schema format. Can be provided as `SINGLE_SCHEMA_FILE` or `SCHEMA_REGISTRY`. " + + "If `SINGLE_SCHEMA_FILE` is specified, use the schema mentioned in the avro schema file for all messages. " + + "If `SCHEMA_REGISTRY` is specified, the messages can have either a single schema or multiple schemas.") @Default.String(SchemaFormat.SINGLE_SCHEMA_FILE) String getSchemaFormat(); diff --git a/v2/kafka-to-bigquery/README_Kafka_to_BigQuery.md b/v2/kafka-to-bigquery/README_Kafka_to_BigQuery.md index 3bd8ec7c75..c5ab89ace9 100644 --- a/v2/kafka-to-bigquery/README_Kafka_to_BigQuery.md +++ b/v2/kafka-to-bigquery/README_Kafka_to_BigQuery.md @@ -21,26 +21,26 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **outputTableSpec** : The BigQuery output table location to write the output to. For example, `:.`.Depending on the `createDisposition` specified, the output table might be created automatically using the user provided Avro schema. +* **outputTableSpec**: The BigQuery output table location to write the output to. For example, `:.`.Depending on the `createDisposition` specified, the output table might be created automatically using the user provided Avro schema. ### Optional parameters -* **readBootstrapServers** : Kafka Bootstrap Server list, separated by commas. (Example: localhost:9092,127.0.0.1:9093). -* **bootstrapServers** : The host address of the running Apache Kafka broker servers in a comma-separated list. Each host address must be in the format `35.70.252.199:9092`. (Example: localhost:9092,127.0.0.1:9093). -* **kafkaReadTopics** : Kafka topic(s) to read input from. (Example: topic1,topic2). -* **inputTopics** : The Apache Kafka input topics to read from in a comma-separated list. (Example: topic1,topic2). -* **outputDeadletterTable** : BigQuery table for failed messages. Messages failed to reach the output table for different reasons (e.g., mismatched schema, malformed json) are written to this table. If it doesn't exist, it will be created during pipeline execution. If not specified, "outputTableSpec_error_records" is used instead. (Example: your-project-id:your-dataset.your-table-name). -* **messageFormat** : The message format. Can be AVRO or JSON. Defaults to: JSON. -* **avroSchemaPath** : Cloud Storage path to Avro schema file. For example, gs://MyBucket/file.avsc. -* **useStorageWriteApiAtLeastOnce** : This parameter takes effect only if "Use BigQuery Storage Write API" is enabled. If enabled the at-least-once semantics will be used for Storage Write API, otherwise exactly-once semantics will be used. Defaults to: false. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. (Example: gs://my-bucket/my-udfs/my_file.js). -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **javascriptTextTransformReloadIntervalMinutes** : Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is 0, UDF reloading is disabled. The default value is 0. -* **writeDisposition** : The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. -* **createDisposition** : The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. -* **useStorageWriteApi** : If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **numStorageWriteApiStreams** : When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. -* **storageWriteApiTriggeringFrequencySec** : When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. +* **readBootstrapServers**: Kafka Bootstrap Server list, separated by commas. For example, `localhost:9092,127.0.0.1:9093`. +* **bootstrapServers**: The host address of the running Apache Kafka broker servers in a comma-separated list. Each host address must be in the format `35.70.252.199:9092`. For example, `localhost:9092,127.0.0.1:9093`. +* **kafkaReadTopics**: Kafka topic(s) to read input from. For example, `topic1,topic2`. +* **inputTopics**: The Apache Kafka input topics to read from in a comma-separated list. For example, `topic1,topic2`. +* **outputDeadletterTable**: BigQuery table for failed messages. Messages failed to reach the output table for different reasons (e.g., mismatched schema, malformed json) are written to this table. If it doesn't exist, it will be created during pipeline execution. If not specified, "outputTableSpec_error_records" is used instead. For example, `your-project-id:your-dataset.your-table-name`. +* **messageFormat**: The message format. Can be AVRO or JSON. Defaults to: JSON. +* **avroSchemaPath**: Cloud Storage path to Avro schema file. For example, gs://MyBucket/file.avsc. +* **useStorageWriteApiAtLeastOnce**: This parameter takes effect only if "Use BigQuery Storage Write API" is enabled. If enabled the at-least-once semantics will be used for Storage Write API, otherwise exactly-once semantics will be used. Defaults to: false. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **javascriptTextTransformReloadIntervalMinutes**: Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is `0`, UDF reloading is disabled. The default value is `0`. +* **writeDisposition**: The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. +* **createDisposition**: The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. +* **useStorageWriteApi**: If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **numStorageWriteApiStreams**: When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. +* **storageWriteApiTriggeringFrequencySec**: When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. ## User-Defined functions (UDFs) @@ -261,15 +261,15 @@ resource "google_dataflow_flex_template_job" "kafka_to_bigquery" { region = var.region parameters = { outputTableSpec = "" - # readBootstrapServers = "localhost:9092,127.0.0.1:9093" - # bootstrapServers = "localhost:9092,127.0.0.1:9093" - # kafkaReadTopics = "topic1,topic2" - # inputTopics = "topic1,topic2" - # outputDeadletterTable = "your-project-id:your-dataset.your-table-name" + # readBootstrapServers = "" + # bootstrapServers = "" + # kafkaReadTopics = "" + # inputTopics = "" + # outputDeadletterTable = "" # messageFormat = "JSON" # avroSchemaPath = "" # useStorageWriteApiAtLeastOnce = "false" - # javascriptTextTransformGcsPath = "gs://my-bucket/my-udfs/my_file.js" + # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" # javascriptTextTransformReloadIntervalMinutes = "0" # writeDisposition = "WRITE_APPEND" diff --git a/v2/kafka-to-bigquery/README_Kafka_to_BigQuery_Flex.md b/v2/kafka-to-bigquery/README_Kafka_to_BigQuery_Flex.md index 7400bca7b7..4b44e3fc99 100644 --- a/v2/kafka-to-bigquery/README_Kafka_to_BigQuery_Flex.md +++ b/v2/kafka-to-bigquery/README_Kafka_to_BigQuery_Flex.md @@ -23,64 +23,51 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **readBootstrapServerAndTopic** : Kafka Topic to read the input from. -* **writeMode** : Write Mode: write records to one table or multiple tables (based on schema). The DYNAMIC_TABLE_NAMES mode is supported only for AVRO_CONFLUENT_WIRE_FORMAT Source Message Format and SCHEMA_REGISTRY Schema Source. The target table name will be auto-generated based on the Avro schema name of each message, it could either be a single schema (creating a single table) or multiple schemas (creating multiple tables). The SINGLE_TABLE_NAME mode writes to a single table (single schema) specified by the user. Defaults to SINGLE_TABLE_NAME. -* **kafkaReadAuthenticationMode** : The mode of authentication to use with the Kafka cluster. Use NONE for no authentication, SASL_PLAIN for SASL/PLAIN username and password, TLSfor certificate-based authentication. APPLICATION_DEFAULT_CREDENTIALS should be used only for Google Cloud Apache Kafka for BigQuery cluster since This allow you to authenticate with Google Cloud Apache Kafka for BigQuery using application default credentials. -* **messageFormat** : The format of the Kafka messages to read. The supported values are AVRO_CONFLUENT_WIRE_FORMAT (Confluent Schema Registry encoded Avro), AVRO_BINARY_ENCODING (Plain binary Avro), and JSON. Defaults to: AVRO_CONFLUENT_WIRE_FORMAT. -* **useBigQueryDLQ** : If true, failed messages will be written to BigQuery with extra error information. Defaults to: false. +* **readBootstrapServerAndTopic**: Kafka Topic to read the input from. +* **writeMode**: Write records to one table or multiple tables (based on schema). The `DYNAMIC_TABLE_NAMES` mode is supported only for `AVRO_CONFLUENT_WIRE_FORMAT` Source Message Format and `SCHEMA_REGISTRY` Schema Source. The target table name is auto-generated based on the Avro schema name of each message, it could either be a single schema (creating a single table) or multiple schemas (creating multiple tables). The `SINGLE_TABLE_NAME` mode writes to a single table (single schema) specified by the user. Defaults to `SINGLE_TABLE_NAME`. +* **kafkaReadAuthenticationMode**: The mode of authentication to use with the Kafka cluster. Use `NONE` for no authentication, `SASL_PLAIN` for SASL/PLAIN username and password, and `TLS` for certificate-based authentication. Apache Kafka for BigQuery only supports the `SASL_PLAIN` authentication mode. Defaults to: SASL_PLAIN. +* **messageFormat**: The format of the Kafka messages to read. The supported values are `AVRO_CONFLUENT_WIRE_FORMAT` (Confluent Schema Registry encoded Avro), `AVRO_BINARY_ENCODING` (Plain binary Avro), and `JSON`. Defaults to: AVRO_CONFLUENT_WIRE_FORMAT. +* **useBigQueryDLQ**: If true, failed messages will be written to BigQuery with extra error information. Defaults to: false. ### Optional parameters -* **outputTableSpec** : BigQuery table location to write the output to. The name should be in the format `:.`. The table's schema must match input objects. -* **persistKafkaKey** : If true, the pipeline will persist the Kafka message key in the BigQuery table, in a `_key` field of type `BYTES`. Default is false (Key is ignored). -* **outputProject** : BigQuery output project in wehich the dataset resides. Tables will be created dynamically in the dataset. Defaults to empty. -* **outputDataset** : BigQuery output dataset to write the output to. Tables will be created dynamically in the dataset. If the tables are created beforehand, the table names should follow the specified naming convention. The name should be `bqTableNamePrefix + Avro Schema FullName` , each word will be separated by a hyphen '-'. Defaults to empty. -* **bqTableNamePrefix** : Naming prefix to be used while creating BigQuery output tables. Only applicable when using schema registry. Defaults to empty. -* **createDisposition** : BigQuery CreateDisposition. For example, CREATE_IF_NEEDED, CREATE_NEVER. Defaults to: CREATE_IF_NEEDED. -* **writeDisposition** : BigQuery WriteDisposition. For example, WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE. Defaults to: WRITE_APPEND. -* **useAutoSharding** : If true, the pipeline uses auto-sharding when writng to BigQueryThe default value is `true`. -* **numStorageWriteApiStreams** : Specifies the number of write streams, this parameter must be set. Default is 0. -* **storageWriteApiTriggeringFrequencySec** : Specifies the triggering frequency in seconds, this parameter must be set. Default is 5 seconds. -* **useStorageWriteApiAtLeastOnce** : This parameter takes effect only if "Use BigQuery Storage Write API" is enabled. If enabled the at-least-once semantics will be used for Storage Write API, otherwise exactly-once semantics will be used. Defaults to: false. -* **enableCommitOffsets** : Commit offsets of processed messages to Kafka. If enabled, this will minimize the gaps or duplicate processing of messages when restarting the pipeline. Requires specifying the Consumer Group ID. Defaults to: false. -* **consumerGroupId** : The unique identifier for the consumer group that this pipeline belongs to. Required if Commit Offsets to Kafka is enabled. Defaults to empty. -* **kafkaReadOffset** : The starting point for reading messages when no committed offsets exist. The earliest starts from the beginning, the latest from the newest message. Defaults to: latest. -* **kafkaReadUsernameSecretId** : The Google Cloud Secret Manager secret ID that contains the Kafka username to use with SASL_PLAIN authentication. (Example: projects//secrets//versions/). Defaults to empty. -* **kafkaReadPasswordSecretId** : The Google Cloud Secret Manager secret ID that contains the Kafka password to use with SASL_PLAIN authentication. (Example: projects//secrets//versions/). Defaults to empty. -* **kafkaReadKeystoreLocation** : The Google Cloud Storage path to the Java KeyStore (JKS) file that contains the TLS certificate and private key to use when authenticating with the Kafka cluster. (Example: gs://your-bucket/keystore.jks). -* **kafkaReadTruststoreLocation** : The Google Cloud Storage path to the Java TrustStore (JKS) file that contains the trusted certificates to use to verify the identity of the Kafka broker. -* **kafkaReadTruststorePasswordSecretId** : The Google Cloud Secret Manager secret ID that contains the password to use to access the Java TrustStore (JKS) file for Kafka TLS authentication (Example: projects//secrets//versions/). -* **kafkaReadKeystorePasswordSecretId** : The Google Cloud Secret Manager secret ID that contains the password to use to access the Java KeyStore (JKS) file for Kafka TLS authentication. (Example: projects//secrets//versions/). -* **kafkaReadKeyPasswordSecretId** : The Google Cloud Secret Manager secret ID that contains the password to use to access the private key within the Java KeyStore (JKS) file for Kafka TLS authentication. (Example: projects//secrets//versions/). -* **schemaFormat** : The Kafka schema format. Can be provided as SINGLE_SCHEMA_FILE or SCHEMA_REGISTRY. If SINGLE_SCHEMA_FILE is specified, all messages should have the schema mentioned in the avro schema file. If SCHEMA_REGISTRY is specified, the messages can have either a single schema or multiple schemas. Defaults to: SINGLE_SCHEMA_FILE. -* **confluentAvroSchemaPath** : The Google Cloud Storage path to the single Avro schema file used to decode all of the messages in a topic. Defaults to empty. -* **schemaRegistryConnectionUrl** : The URL for the Confluent Schema Registry instance used to manage Avro schemas for message decoding. Defaults to empty. -* **binaryAvroSchemaPath** : The Google Cloud Storage path to the Avro schema file used to decode binary-encoded Avro messages. Defaults to empty. -* **schemaRegistryAuthenticationMode** : Schema Registry authentication mode. Can be NONE, TLS or OAUTH. Defaults to: NONE. -* **schemaRegistryTruststoreLocation** : Location of the SSL certificate where the trust store for authentication to Schema Registry are stored. (Example: /your-bucket/truststore.jks). -* **schemaRegistryTruststorePasswordSecretId** : SecretId in secret manager where the password to access secret in truststore is stored. (Example: projects/your-project-number/secrets/your-secret-name/versions/your-secret-version). -* **schemaRegistryKeystoreLocation** : Keystore location that contains the SSL certificate and private key. (Example: /your-bucket/keystore.jks). -* **schemaRegistryKeystorePasswordSecretId** : SecretId in secret manager where the password to access the keystore file (Example: projects/your-project-number/secrets/your-secret-name/versions/your-secret-version). -* **schemaRegistryKeyPasswordSecretId** : SecretId of password required to access the client's private key stored within the keystore (Example: projects/your-project-number/secrets/your-secret-name/versions/your-secret-version). -* **schemaRegistryOauthClientId** : Client ID used to authenticate the Schema Registry client in OAUTH mode. Required for AVRO_CONFLUENT_WIRE_FORMAT message format. -* **schemaRegistryOauthClientSecretId** : The Google Cloud Secret Manager secret ID that contains the Client Secret to use to authenticate the Schema Registry client in OAUTH mode. Required for AVRO_CONFLUENT_WIRE_FORMAT message format. (Example: projects//secrets//versions/). -* **schemaRegistryOauthScope** : The access token scope used to authenticate the Schema Registry client in OAUTH mode. This field is optional, as the request can be made without a scope parameter passed. (Example: openid). -* **schemaRegistryOauthTokenEndpointUrl** : The HTTP(S)-based URL for the OAuth/OIDC identity provider used to authenticate the Schema Registry client in OAUTH mode. Required for AVRO_CONFLUENT_WIRE_FORMAT message format. -* **outputDeadletterTable** : Fully Qualified BigQuery table name for failed messages. Messages failed to reach the output table for different reasons (e.g., mismatched schema, malformed json) are written to this table.The table will be created by the template. (Example: your-project-id:your-dataset.your-table-name). -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. (Example: gs://my-bucket/my-udfs/my_file.js). -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **javascriptTextTransformReloadIntervalMinutes** : Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is 0, UDF reloading is disabled. The default value is 0. - - -## User-Defined functions (UDFs) - -The Kafka to BigQuery Template supports User-Defined functions (UDFs). -UDFs allow you to customize functionality by providing a JavaScript function -without having to maintain or build the entire template code. - -Check [Create user-defined functions for Dataflow templates](https://cloud.google.com/dataflow/docs/guides/templates/create-template-udf) -and [Using UDFs](https://github.com/GoogleCloudPlatform/DataflowTemplates#using-udfs) -for more information about how to create and test those functions. +* **outputTableSpec**: BigQuery table location to write the output to. The name should be in the format `:.`. The table's schema must match input objects. +* **persistKafkaKey**: If true, the pipeline will persist the Kafka message key in the BigQuery table, in a `_key` field of type `BYTES`. Default is `false` (Key is ignored). +* **outputProject**: BigQuery output project in wehich the dataset resides. Tables will be created dynamically in the dataset. Defaults to empty. +* **outputDataset**: BigQuery output dataset to write the output to. Tables will be created dynamically in the dataset. If the tables are created beforehand, the table names should follow the specified naming convention. The name should be `bqTableNamePrefix + Avro Schema FullName` , each word will be separated by a hyphen `-`. Defaults to empty. +* **bqTableNamePrefix**: Naming prefix to be used while creating BigQuery output tables. Only applicable when using schema registry. Defaults to empty. +* **createDisposition**: BigQuery CreateDisposition. For example: `CREATE_IF_NEEDED`, `CREATE_NEVER`. Defaults to: CREATE_IF_NEEDED. +* **writeDisposition**: BigQuery WriteDisposition. For example: `WRITE_APPEND`, `WRITE_EMPTY` or `WRITE_TRUNCATE`. Defaults to: WRITE_APPEND. +* **useAutoSharding**: If true, the pipeline uses auto-sharding when writng to BigQueryThe default value is `true`. +* **numStorageWriteApiStreams**: Specifies the number of write streams, this parameter must be set. Default is `0`. +* **storageWriteApiTriggeringFrequencySec**: Specifies the triggering frequency in seconds, this parameter must be set. Default is 5 seconds. +* **useStorageWriteApiAtLeastOnce**: This parameter takes effect only if "Use BigQuery Storage Write API" is enabled. If enabled the at-least-once semantics will be used for Storage Write API, otherwise exactly-once semantics will be used. Defaults to: false. +* **enableCommitOffsets**: Commit offsets of processed messages to Kafka. If enabled, this will minimize the gaps or duplicate processing of messages when restarting the pipeline. Requires specifying the Consumer Group ID. Defaults to: false. +* **consumerGroupId**: The unique identifier for the consumer group that this pipeline belongs to. Required if Commit Offsets to Kafka is enabled. Defaults to empty. +* **kafkaReadOffset**: The starting point for reading messages when no committed offsets exist. The earliest starts from the beginning, the latest from the newest message. Defaults to: latest. +* **kafkaReadUsernameSecretId**: The Google Cloud Secret Manager secret ID that contains the Kafka username to use with `SASL_PLAIN` authentication. For example, `projects//secrets//versions/`. Defaults to empty. +* **kafkaReadPasswordSecretId**: The Google Cloud Secret Manager secret ID that contains the Kafka password to use with `SASL_PLAIN` authentication. For example, `projects//secrets//versions/`. Defaults to empty. +* **kafkaReadKeystoreLocation**: The Google Cloud Storage path to the Java KeyStore (JKS) file that contains the TLS certificate and private key to use when authenticating with the Kafka cluster. For example, `gs://your-bucket/keystore.jks`. +* **kafkaReadTruststoreLocation**: The Google Cloud Storage path to the Java TrustStore (JKS) file that contains the trusted certificates to use to verify the identity of the Kafka broker. +* **kafkaReadTruststorePasswordSecretId**: The Google Cloud Secret Manager secret ID that contains the password to use to access the Java TrustStore (JKS) file for Kafka TLS authentication For example, `projects//secrets//versions/`. +* **kafkaReadKeystorePasswordSecretId**: The Google Cloud Secret Manager secret ID that contains the password to use to access the Java KeyStore (JKS) file for Kafka TLS authentication. For example, `projects//secrets//versions/`. +* **kafkaReadKeyPasswordSecretId**: The Google Cloud Secret Manager secret ID that contains the password to use to access the private key within the Java KeyStore (JKS) file for Kafka TLS authentication. For example, `projects//secrets//versions/`. +* **schemaFormat**: The Kafka schema format. Can be provided as `SINGLE_SCHEMA_FILE` or `SCHEMA_REGISTRY`. If `SINGLE_SCHEMA_FILE` is specified, use the schema mentioned in the avro schema file for all messages. If `SCHEMA_REGISTRY` is specified, the messages can have either a single schema or multiple schemas. Defaults to: SINGLE_SCHEMA_FILE. +* **confluentAvroSchemaPath**: The Google Cloud Storage path to the single Avro schema file used to decode all of the messages in a topic. Defaults to empty. +* **schemaRegistryConnectionUrl**: The URL for the Confluent Schema Registry instance used to manage Avro schemas for message decoding. Defaults to empty. +* **binaryAvroSchemaPath**: The Google Cloud Storage path to the Avro schema file used to decode binary-encoded Avro messages. Defaults to empty. +* **schemaRegistryAuthenticationMode**: Schema Registry authentication mode. Can be NONE, TLS or OAUTH. Defaults to: NONE. +* **schemaRegistryTruststoreLocation**: Location of the SSL certificate where the trust store for authentication to Schema Registry are stored. For example, `/your-bucket/truststore.jks`. +* **schemaRegistryTruststorePasswordSecretId**: SecretId in secret manager where the password to access secret in truststore is stored. For example, `projects/your-project-number/secrets/your-secret-name/versions/your-secret-version`. +* **schemaRegistryKeystoreLocation**: Keystore location that contains the SSL certificate and private key. For example, `/your-bucket/keystore.jks`. +* **schemaRegistryKeystorePasswordSecretId**: SecretId in secret manager where the password to access the keystore file For example, `projects/your-project-number/secrets/your-secret-name/versions/your-secret-version`. +* **schemaRegistryKeyPasswordSecretId**: SecretId of password required to access the client's private key stored within the keystore For example, `projects/your-project-number/secrets/your-secret-name/versions/your-secret-version`. +* **schemaRegistryOauthClientId**: Client ID used to authenticate the Schema Registry client in OAUTH mode. Required for AVRO_CONFLUENT_WIRE_FORMAT message format. +* **schemaRegistryOauthClientSecretId**: The Google Cloud Secret Manager secret ID that contains the Client Secret to use to authenticate the Schema Registry client in OAUTH mode. Required for AVRO_CONFLUENT_WIRE_FORMAT message format. For example, `projects//secrets//versions/`. +* **schemaRegistryOauthScope**: The access token scope used to authenticate the Schema Registry client in OAUTH mode. This field is optional, as the request can be made without a scope parameter passed. For example, `openid`. +* **schemaRegistryOauthTokenEndpointUrl**: The HTTP(S)-based URL for the OAuth/OIDC identity provider used to authenticate the Schema Registry client in OAUTH mode. Required for AVRO_CONFLUENT_WIRE_FORMAT message format. +* **outputDeadletterTable**: Fully Qualified BigQuery table name for failed messages. Messages failed to reach the output table for different reasons (e.g., mismatched schema, malformed json) are written to this table.The table will be created by the template. For example, `your-project-id:your-dataset.your-table-name`. + ## Getting Started @@ -160,7 +147,7 @@ export TEMPLATE_SPEC_GCSPATH="gs://$BUCKET_NAME/templates/flex/Kafka_to_BigQuery ### Required export READ_BOOTSTRAP_SERVER_AND_TOPIC= export WRITE_MODE=SINGLE_TABLE_NAME -export KAFKA_READ_AUTHENTICATION_MODE=APPLICATION_DEFAULT_CREDENTIALS +export KAFKA_READ_AUTHENTICATION_MODE=SASL_PLAIN export MESSAGE_FORMAT=AVRO_CONFLUENT_WIRE_FORMAT export USE_BIG_QUERY_DLQ=false @@ -201,9 +188,6 @@ export SCHEMA_REGISTRY_OAUTH_CLIENT_SECRET_ID= export SCHEMA_REGISTRY_OAUTH_TOKEN_ENDPOINT_URL= export OUTPUT_DEADLETTER_TABLE= -export JAVASCRIPT_TEXT_TRANSFORM_GCS_PATH= -export JAVASCRIPT_TEXT_TRANSFORM_FUNCTION_NAME= -export JAVASCRIPT_TEXT_TRANSFORM_RELOAD_INTERVAL_MINUTES=0 gcloud dataflow flex-template run "kafka-to-bigquery-flex-job" \ --project "$PROJECT" \ @@ -249,10 +233,7 @@ gcloud dataflow flex-template run "kafka-to-bigquery-flex-job" \ --parameters "schemaRegistryOauthScope=$SCHEMA_REGISTRY_OAUTH_SCOPE" \ --parameters "schemaRegistryOauthTokenEndpointUrl=$SCHEMA_REGISTRY_OAUTH_TOKEN_ENDPOINT_URL" \ --parameters "outputDeadletterTable=$OUTPUT_DEADLETTER_TABLE" \ - --parameters "useBigQueryDLQ=$USE_BIG_QUERY_DLQ" \ - --parameters "javascriptTextTransformGcsPath=$JAVASCRIPT_TEXT_TRANSFORM_GCS_PATH" \ - --parameters "javascriptTextTransformFunctionName=$JAVASCRIPT_TEXT_TRANSFORM_FUNCTION_NAME" \ - --parameters "javascriptTextTransformReloadIntervalMinutes=$JAVASCRIPT_TEXT_TRANSFORM_RELOAD_INTERVAL_MINUTES" + --parameters "useBigQueryDLQ=$USE_BIG_QUERY_DLQ" ``` For more information about the command, please check: @@ -273,7 +254,7 @@ export REGION=us-central1 ### Required export READ_BOOTSTRAP_SERVER_AND_TOPIC= export WRITE_MODE=SINGLE_TABLE_NAME -export KAFKA_READ_AUTHENTICATION_MODE=APPLICATION_DEFAULT_CREDENTIALS +export KAFKA_READ_AUTHENTICATION_MODE=SASL_PLAIN export MESSAGE_FORMAT=AVRO_CONFLUENT_WIRE_FORMAT export USE_BIG_QUERY_DLQ=false @@ -314,9 +295,6 @@ export SCHEMA_REGISTRY_OAUTH_CLIENT_SECRET_ID= export SCHEMA_REGISTRY_OAUTH_TOKEN_ENDPOINT_URL= export OUTPUT_DEADLETTER_TABLE= -export JAVASCRIPT_TEXT_TRANSFORM_GCS_PATH= -export JAVASCRIPT_TEXT_TRANSFORM_FUNCTION_NAME= -export JAVASCRIPT_TEXT_TRANSFORM_RELOAD_INTERVAL_MINUTES=0 mvn clean package -PtemplatesRun \ -DskipTests \ @@ -325,7 +303,7 @@ mvn clean package -PtemplatesRun \ -Dregion="$REGION" \ -DjobName="kafka-to-bigquery-flex-job" \ -DtemplateName="Kafka_to_BigQuery_Flex" \ --Dparameters="readBootstrapServerAndTopic=$READ_BOOTSTRAP_SERVER_AND_TOPIC,outputTableSpec=$OUTPUT_TABLE_SPEC,persistKafkaKey=$PERSIST_KAFKA_KEY,writeMode=$WRITE_MODE,outputProject=$OUTPUT_PROJECT,outputDataset=$OUTPUT_DATASET,bqTableNamePrefix=$BQ_TABLE_NAME_PREFIX,createDisposition=$CREATE_DISPOSITION,writeDisposition=$WRITE_DISPOSITION,useAutoSharding=$USE_AUTO_SHARDING,numStorageWriteApiStreams=$NUM_STORAGE_WRITE_API_STREAMS,storageWriteApiTriggeringFrequencySec=$STORAGE_WRITE_API_TRIGGERING_FREQUENCY_SEC,useStorageWriteApiAtLeastOnce=$USE_STORAGE_WRITE_API_AT_LEAST_ONCE,enableCommitOffsets=$ENABLE_COMMIT_OFFSETS,consumerGroupId=$CONSUMER_GROUP_ID,kafkaReadOffset=$KAFKA_READ_OFFSET,kafkaReadAuthenticationMode=$KAFKA_READ_AUTHENTICATION_MODE,kafkaReadUsernameSecretId=$KAFKA_READ_USERNAME_SECRET_ID,kafkaReadPasswordSecretId=$KAFKA_READ_PASSWORD_SECRET_ID,kafkaReadKeystoreLocation=$KAFKA_READ_KEYSTORE_LOCATION,kafkaReadTruststoreLocation=$KAFKA_READ_TRUSTSTORE_LOCATION,kafkaReadTruststorePasswordSecretId=$KAFKA_READ_TRUSTSTORE_PASSWORD_SECRET_ID,kafkaReadKeystorePasswordSecretId=$KAFKA_READ_KEYSTORE_PASSWORD_SECRET_ID,kafkaReadKeyPasswordSecretId=$KAFKA_READ_KEY_PASSWORD_SECRET_ID,messageFormat=$MESSAGE_FORMAT,schemaFormat=$SCHEMA_FORMAT,confluentAvroSchemaPath=$CONFLUENT_AVRO_SCHEMA_PATH,schemaRegistryConnectionUrl=$SCHEMA_REGISTRY_CONNECTION_URL,binaryAvroSchemaPath=$BINARY_AVRO_SCHEMA_PATH,schemaRegistryAuthenticationMode=$SCHEMA_REGISTRY_AUTHENTICATION_MODE,schemaRegistryTruststoreLocation=$SCHEMA_REGISTRY_TRUSTSTORE_LOCATION,schemaRegistryTruststorePasswordSecretId=$SCHEMA_REGISTRY_TRUSTSTORE_PASSWORD_SECRET_ID,schemaRegistryKeystoreLocation=$SCHEMA_REGISTRY_KEYSTORE_LOCATION,schemaRegistryKeystorePasswordSecretId=$SCHEMA_REGISTRY_KEYSTORE_PASSWORD_SECRET_ID,schemaRegistryKeyPasswordSecretId=$SCHEMA_REGISTRY_KEY_PASSWORD_SECRET_ID,schemaRegistryOauthClientId=$SCHEMA_REGISTRY_OAUTH_CLIENT_ID,schemaRegistryOauthClientSecretId=$SCHEMA_REGISTRY_OAUTH_CLIENT_SECRET_ID,schemaRegistryOauthScope=$SCHEMA_REGISTRY_OAUTH_SCOPE,schemaRegistryOauthTokenEndpointUrl=$SCHEMA_REGISTRY_OAUTH_TOKEN_ENDPOINT_URL,outputDeadletterTable=$OUTPUT_DEADLETTER_TABLE,useBigQueryDLQ=$USE_BIG_QUERY_DLQ,javascriptTextTransformGcsPath=$JAVASCRIPT_TEXT_TRANSFORM_GCS_PATH,javascriptTextTransformFunctionName=$JAVASCRIPT_TEXT_TRANSFORM_FUNCTION_NAME,javascriptTextTransformReloadIntervalMinutes=$JAVASCRIPT_TEXT_TRANSFORM_RELOAD_INTERVAL_MINUTES" \ +-Dparameters="readBootstrapServerAndTopic=$READ_BOOTSTRAP_SERVER_AND_TOPIC,outputTableSpec=$OUTPUT_TABLE_SPEC,persistKafkaKey=$PERSIST_KAFKA_KEY,writeMode=$WRITE_MODE,outputProject=$OUTPUT_PROJECT,outputDataset=$OUTPUT_DATASET,bqTableNamePrefix=$BQ_TABLE_NAME_PREFIX,createDisposition=$CREATE_DISPOSITION,writeDisposition=$WRITE_DISPOSITION,useAutoSharding=$USE_AUTO_SHARDING,numStorageWriteApiStreams=$NUM_STORAGE_WRITE_API_STREAMS,storageWriteApiTriggeringFrequencySec=$STORAGE_WRITE_API_TRIGGERING_FREQUENCY_SEC,useStorageWriteApiAtLeastOnce=$USE_STORAGE_WRITE_API_AT_LEAST_ONCE,enableCommitOffsets=$ENABLE_COMMIT_OFFSETS,consumerGroupId=$CONSUMER_GROUP_ID,kafkaReadOffset=$KAFKA_READ_OFFSET,kafkaReadAuthenticationMode=$KAFKA_READ_AUTHENTICATION_MODE,kafkaReadUsernameSecretId=$KAFKA_READ_USERNAME_SECRET_ID,kafkaReadPasswordSecretId=$KAFKA_READ_PASSWORD_SECRET_ID,kafkaReadKeystoreLocation=$KAFKA_READ_KEYSTORE_LOCATION,kafkaReadTruststoreLocation=$KAFKA_READ_TRUSTSTORE_LOCATION,kafkaReadTruststorePasswordSecretId=$KAFKA_READ_TRUSTSTORE_PASSWORD_SECRET_ID,kafkaReadKeystorePasswordSecretId=$KAFKA_READ_KEYSTORE_PASSWORD_SECRET_ID,kafkaReadKeyPasswordSecretId=$KAFKA_READ_KEY_PASSWORD_SECRET_ID,messageFormat=$MESSAGE_FORMAT,schemaFormat=$SCHEMA_FORMAT,confluentAvroSchemaPath=$CONFLUENT_AVRO_SCHEMA_PATH,schemaRegistryConnectionUrl=$SCHEMA_REGISTRY_CONNECTION_URL,binaryAvroSchemaPath=$BINARY_AVRO_SCHEMA_PATH,schemaRegistryAuthenticationMode=$SCHEMA_REGISTRY_AUTHENTICATION_MODE,schemaRegistryTruststoreLocation=$SCHEMA_REGISTRY_TRUSTSTORE_LOCATION,schemaRegistryTruststorePasswordSecretId=$SCHEMA_REGISTRY_TRUSTSTORE_PASSWORD_SECRET_ID,schemaRegistryKeystoreLocation=$SCHEMA_REGISTRY_KEYSTORE_LOCATION,schemaRegistryKeystorePasswordSecretId=$SCHEMA_REGISTRY_KEYSTORE_PASSWORD_SECRET_ID,schemaRegistryKeyPasswordSecretId=$SCHEMA_REGISTRY_KEY_PASSWORD_SECRET_ID,schemaRegistryOauthClientId=$SCHEMA_REGISTRY_OAUTH_CLIENT_ID,schemaRegistryOauthClientSecretId=$SCHEMA_REGISTRY_OAUTH_CLIENT_SECRET_ID,schemaRegistryOauthScope=$SCHEMA_REGISTRY_OAUTH_SCOPE,schemaRegistryOauthTokenEndpointUrl=$SCHEMA_REGISTRY_OAUTH_TOKEN_ENDPOINT_URL,outputDeadletterTable=$OUTPUT_DEADLETTER_TABLE,useBigQueryDLQ=$USE_BIG_QUERY_DLQ" \ -f v2/kafka-to-bigquery ``` @@ -372,7 +350,7 @@ resource "google_dataflow_flex_template_job" "kafka_to_bigquery_flex" { parameters = { readBootstrapServerAndTopic = "" writeMode = "SINGLE_TABLE_NAME" - kafkaReadAuthenticationMode = "APPLICATION_DEFAULT_CREDENTIALS" + kafkaReadAuthenticationMode = "SASL_PLAIN" messageFormat = "AVRO_CONFLUENT_WIRE_FORMAT" useBigQueryDLQ = "false" # outputTableSpec = "" @@ -389,31 +367,28 @@ resource "google_dataflow_flex_template_job" "kafka_to_bigquery_flex" { # enableCommitOffsets = "false" # consumerGroupId = "" # kafkaReadOffset = "latest" - # kafkaReadUsernameSecretId = "projects//secrets//versions/" - # kafkaReadPasswordSecretId = "projects//secrets//versions/" - # kafkaReadKeystoreLocation = "gs://your-bucket/keystore.jks" + # kafkaReadUsernameSecretId = "" + # kafkaReadPasswordSecretId = "" + # kafkaReadKeystoreLocation = "" # kafkaReadTruststoreLocation = "" - # kafkaReadTruststorePasswordSecretId = "projects//secrets//versions/" - # kafkaReadKeystorePasswordSecretId = "projects//secrets//versions/" - # kafkaReadKeyPasswordSecretId = "projects//secrets//versions/" + # kafkaReadTruststorePasswordSecretId = "" + # kafkaReadKeystorePasswordSecretId = "" + # kafkaReadKeyPasswordSecretId = "" # schemaFormat = "SINGLE_SCHEMA_FILE" # confluentAvroSchemaPath = "" # schemaRegistryConnectionUrl = "" # binaryAvroSchemaPath = "" # schemaRegistryAuthenticationMode = "NONE" - # schemaRegistryTruststoreLocation = "/your-bucket/truststore.jks" - # schemaRegistryTruststorePasswordSecretId = "projects/your-project-number/secrets/your-secret-name/versions/your-secret-version" - # schemaRegistryKeystoreLocation = "/your-bucket/keystore.jks" - # schemaRegistryKeystorePasswordSecretId = "projects/your-project-number/secrets/your-secret-name/versions/your-secret-version" - # schemaRegistryKeyPasswordSecretId = "projects/your-project-number/secrets/your-secret-name/versions/your-secret-version" + # schemaRegistryTruststoreLocation = "" + # schemaRegistryTruststorePasswordSecretId = "" + # schemaRegistryKeystoreLocation = "" + # schemaRegistryKeystorePasswordSecretId = "" + # schemaRegistryKeyPasswordSecretId = "" # schemaRegistryOauthClientId = "" - # schemaRegistryOauthClientSecretId = "projects//secrets//versions/" - # schemaRegistryOauthScope = "openid" + # schemaRegistryOauthClientSecretId = "" + # schemaRegistryOauthScope = "" # schemaRegistryOauthTokenEndpointUrl = "" - # outputDeadletterTable = "your-project-id:your-dataset.your-table-name" - # javascriptTextTransformGcsPath = "gs://my-bucket/my-udfs/my_file.js" - # javascriptTextTransformFunctionName = "" - # javascriptTextTransformReloadIntervalMinutes = "0" + # outputDeadletterTable = "" } } ``` diff --git a/v2/kafka-to-bigquery/src/main/java/com/google/cloud/teleport/v2/options/KafkaToBigQueryFlexOptions.java b/v2/kafka-to-bigquery/src/main/java/com/google/cloud/teleport/v2/options/KafkaToBigQueryFlexOptions.java index 4ad4a0bf4f..5d06d494db 100644 --- a/v2/kafka-to-bigquery/src/main/java/com/google/cloud/teleport/v2/options/KafkaToBigQueryFlexOptions.java +++ b/v2/kafka-to-bigquery/src/main/java/com/google/cloud/teleport/v2/options/KafkaToBigQueryFlexOptions.java @@ -52,7 +52,7 @@ public interface KafkaToBigQueryFlexOptions optional = true, description = "Persist the Kafka Message Key to the BigQuery table", helpText = - "If true, the pipeline will persist the Kafka message key in the BigQuery table, in a `_key` field of type `BYTES`. Default is false (Key is ignored).") + "If true, the pipeline will persist the Kafka message key in the BigQuery table, in a `_key` field of type `BYTES`. Default is `false` (Key is ignored).") @Default.Boolean(false) Boolean getPersistKafkaKey(); @@ -69,12 +69,12 @@ public interface KafkaToBigQueryFlexOptions optional = false, description = "Table Name Strategy", helpText = - "Write Mode: write records to one table or multiple tables (based on schema)." - + " The DYNAMIC_TABLE_NAMES mode is supported only for AVRO_CONFLUENT_WIRE_FORMAT Source Message Format" - + " and SCHEMA_REGISTRY Schema Source. The target table name will be auto-generated based on the Avro" + "Write records to one table or multiple tables (based on schema)." + + " The `DYNAMIC_TABLE_NAMES` mode is supported only for `AVRO_CONFLUENT_WIRE_FORMAT` Source Message Format" + + " and `SCHEMA_REGISTRY` Schema Source. The target table name is auto-generated based on the Avro" + " schema name of each message, it could either be a single schema (creating a single table) or" - + " multiple schemas (creating multiple tables). The SINGLE_TABLE_NAME mode writes to a single" - + " table (single schema) specified by the user. Defaults to SINGLE_TABLE_NAME.") + + " multiple schemas (creating multiple tables). The `SINGLE_TABLE_NAME` mode writes to a single" + + " table (single schema) specified by the user. Defaults to `SINGLE_TABLE_NAME`.") @Default.String("SINGLE_TABLE_NAME") String getWriteMode(); @@ -119,7 +119,7 @@ public interface KafkaToBigQueryFlexOptions "BigQuery output dataset to write the output to. Tables will be created dynamically in the dataset." + " If the tables are created beforehand, the table names should follow the specified naming convention." + " The name should be `bqTableNamePrefix + Avro Schema FullName` ," - + " each word will be separated by a hyphen '-'.") + + " each word will be separated by a hyphen `-`.") @Default.String("") String getOutputDataset(); @@ -149,7 +149,7 @@ public interface KafkaToBigQueryFlexOptions optional = true, description = "Write Disposition to use for BigQuery", helpText = - "BigQuery WriteDisposition. For example, WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE.", + "BigQuery WriteDisposition. For example: `WRITE_APPEND`, `WRITE_EMPTY` or `WRITE_TRUNCATE`.", hiddenUi = true) @Default.String("WRITE_APPEND") String getWriteDisposition(); @@ -165,7 +165,7 @@ public interface KafkaToBigQueryFlexOptions }, optional = true, description = "Create Disposition to use for BigQuery", - helpText = "BigQuery CreateDisposition. For example, CREATE_IF_NEEDED, CREATE_NEVER.", + helpText = "BigQuery CreateDisposition. For example: `CREATE_IF_NEEDED`, `CREATE_NEVER`.", hiddenUi = true) @Default.String("CREATE_IF_NEEDED") String getCreateDisposition(); @@ -204,7 +204,8 @@ public interface KafkaToBigQueryFlexOptions groupName = "Destination", optional = true, description = "Number of streams for BigQuery Storage Write API", - helpText = "Specifies the number of write streams, this parameter must be set. Default is 0.") + helpText = + "Specifies the number of write streams, this parameter must be set. Default is `0`.") @Override @Default.Integer(0) Integer getNumStorageWriteApiStreams(); diff --git a/v2/kafka-to-gcs/README_Kafka_to_GCS.md b/v2/kafka-to-gcs/README_Kafka_to_GCS.md index c6dd004458..fdec9d1f6f 100644 --- a/v2/kafka-to-gcs/README_Kafka_to_GCS.md +++ b/v2/kafka-to-gcs/README_Kafka_to_GCS.md @@ -14,16 +14,16 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **bootstrapServers** : Kafka Bootstrap Server list, separated by commas. (Example: localhost:9092,127.0.0.1:9093). -* **inputTopics** : Kafka topic(s) to read the input from. (Example: topic1,topic2). -* **outputFileFormat** : The file format of the desired output files. Can be TEXT, AVRO or PARQUET. Defaults to TEXT. -* **outputDirectory** : The path and filename prefix for writing output files. Must end with a slash. (Example: gs://your-bucket/your-path). -* **numShards** : The maximum number of output shards produced when writing. Default number is runner-dependent. +* **bootstrapServers**: Kafka Bootstrap Server list, separated by commas. For example, `localhost:9092,127.0.0.1:9093`. +* **inputTopics**: Kafka topic(s) to read the input from. For example, `topic1,topic2`. +* **outputFileFormat**: The file format of the desired output files. Can be TEXT, AVRO or PARQUET. Defaults to TEXT. +* **outputDirectory**: The path and filename prefix for writing output files. Must end with a slash. For example, `gs://your-bucket/your-path`. +* **numShards**: The maximum number of output shards produced when writing. Default number is runner-dependent. ### Optional parameters -* **windowDuration** : The window duration/size in which data will be written to Cloud Storage. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). (Example: 5m). Defaults to: 5m. -* **outputFilenamePrefix** : The prefix to place on each windowed file. (Example: output-). Defaults to: output. +* **windowDuration**: The window duration/size in which data will be written to Cloud Storage. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). For example, `5m`. Defaults to: 5m. +* **outputFilenamePrefix**: The prefix to place on each windowed file. For example, `output-`. Defaults to: output. @@ -203,13 +203,13 @@ resource "google_dataflow_flex_template_job" "kafka_to_gcs" { name = "kafka-to-gcs" region = var.region parameters = { - bootstrapServers = "localhost:9092,127.0.0.1:9093" - inputTopics = "topic1,topic2" + bootstrapServers = "" + inputTopics = "" outputFileFormat = "TEXT" - outputDirectory = "gs://your-bucket/your-path" + outputDirectory = "" numShards = "0" # windowDuration = "5m" - # outputFilenamePrefix = "output-" + # outputFilenamePrefix = "output" } } ``` diff --git a/v2/kafka-to-gcs/README_Kafka_to_Gcs_Flex.md b/v2/kafka-to-gcs/README_Kafka_to_Gcs_Flex.md index 7e612c0cc6..873e1251e1 100644 --- a/v2/kafka-to-gcs/README_Kafka_to_Gcs_Flex.md +++ b/v2/kafka-to-gcs/README_Kafka_to_Gcs_Flex.md @@ -14,42 +14,42 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **readBootstrapServerAndTopic** : Kafka Topic to read the input from. -* **outputDirectory** : The path and filename prefix for writing output files. Must end with a slash. (Example: gs://your-bucket/your-path/). -* **kafkaReadAuthenticationMode** : The mode of authentication to use with the Kafka cluster. Use NONE for no authentication, SASL_PLAIN for SASL/PLAIN username and password, TLSfor certificate-based authentication. APPLICATION_DEFAULT_CREDENTIALS should be used only for Google Cloud Apache Kafka for BigQuery cluster since This allow you to authenticate with Google Cloud Apache Kafka for BigQuery using application default credentials. -* **messageFormat** : The format of the Kafka messages to read. The supported values are AVRO_CONFLUENT_WIRE_FORMAT (Confluent Schema Registry encoded Avro), AVRO_BINARY_ENCODING (Plain binary Avro), and JSON. Defaults to: AVRO_CONFLUENT_WIRE_FORMAT. -* **useBigQueryDLQ** : If true, failed messages will be written to BigQuery with extra error information. Defaults to: false. +* **readBootstrapServerAndTopic**: Kafka Topic to read the input from. +* **outputDirectory**: The path and filename prefix for writing output files. Must end with a slash. For example, `gs://your-bucket/your-path/`. +* **kafkaReadAuthenticationMode**: The mode of authentication to use with the Kafka cluster. Use `NONE` for no authentication, `SASL_PLAIN` for SASL/PLAIN username and password, and `TLS` for certificate-based authentication. Apache Kafka for BigQuery only supports the `SASL_PLAIN` authentication mode. Defaults to: SASL_PLAIN. +* **messageFormat**: The format of the Kafka messages to read. The supported values are `AVRO_CONFLUENT_WIRE_FORMAT` (Confluent Schema Registry encoded Avro), `AVRO_BINARY_ENCODING` (Plain binary Avro), and `JSON`. Defaults to: AVRO_CONFLUENT_WIRE_FORMAT. +* **useBigQueryDLQ**: If true, failed messages will be written to BigQuery with extra error information. Defaults to: false. ### Optional parameters -* **windowDuration** : The window duration/size in which data will be written to Cloud Storage. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). (Example: 5m). Defaults to: 5m. -* **outputFilenamePrefix** : The prefix to place on each windowed file. (Example: output-). Defaults to: output. -* **numShards** : The maximum number of output shards produced when writing. A higher number of shards means higher throughput for writing to Cloud Storage, but potentially higher data aggregation cost across shards when processing output Cloud Storage files. Default value is decided by Dataflow. -* **enableCommitOffsets** : Commit offsets of processed messages to Kafka. If enabled, this will minimize the gaps or duplicate processing of messages when restarting the pipeline. Requires specifying the Consumer Group ID. Defaults to: false. -* **consumerGroupId** : The unique identifier for the consumer group that this pipeline belongs to. Required if Commit Offsets to Kafka is enabled. Defaults to empty. -* **kafkaReadOffset** : The starting point for reading messages when no committed offsets exist. The earliest starts from the beginning, the latest from the newest message. Defaults to: latest. -* **kafkaReadUsernameSecretId** : The Google Cloud Secret Manager secret ID that contains the Kafka username to use with SASL_PLAIN authentication. (Example: projects//secrets//versions/). Defaults to empty. -* **kafkaReadPasswordSecretId** : The Google Cloud Secret Manager secret ID that contains the Kafka password to use with SASL_PLAIN authentication. (Example: projects//secrets//versions/). Defaults to empty. -* **kafkaReadKeystoreLocation** : The Google Cloud Storage path to the Java KeyStore (JKS) file that contains the TLS certificate and private key to use when authenticating with the Kafka cluster. (Example: gs://your-bucket/keystore.jks). -* **kafkaReadTruststoreLocation** : The Google Cloud Storage path to the Java TrustStore (JKS) file that contains the trusted certificates to use to verify the identity of the Kafka broker. -* **kafkaReadTruststorePasswordSecretId** : The Google Cloud Secret Manager secret ID that contains the password to use to access the Java TrustStore (JKS) file for Kafka TLS authentication (Example: projects//secrets//versions/). -* **kafkaReadKeystorePasswordSecretId** : The Google Cloud Secret Manager secret ID that contains the password to use to access the Java KeyStore (JKS) file for Kafka TLS authentication. (Example: projects//secrets//versions/). -* **kafkaReadKeyPasswordSecretId** : The Google Cloud Secret Manager secret ID that contains the password to use to access the private key within the Java KeyStore (JKS) file for Kafka TLS authentication. (Example: projects//secrets//versions/). -* **schemaFormat** : The Kafka schema format. Can be provided as SINGLE_SCHEMA_FILE or SCHEMA_REGISTRY. If SINGLE_SCHEMA_FILE is specified, all messages should have the schema mentioned in the avro schema file. If SCHEMA_REGISTRY is specified, the messages can have either a single schema or multiple schemas. Defaults to: SINGLE_SCHEMA_FILE. -* **confluentAvroSchemaPath** : The Google Cloud Storage path to the single Avro schema file used to decode all of the messages in a topic. Defaults to empty. -* **schemaRegistryConnectionUrl** : The URL for the Confluent Schema Registry instance used to manage Avro schemas for message decoding. Defaults to empty. -* **binaryAvroSchemaPath** : The Google Cloud Storage path to the Avro schema file used to decode binary-encoded Avro messages. Defaults to empty. -* **schemaRegistryAuthenticationMode** : Schema Registry authentication mode. Can be NONE, TLS or OAUTH. Defaults to: NONE. -* **schemaRegistryTruststoreLocation** : Location of the SSL certificate where the trust store for authentication to Schema Registry are stored. (Example: /your-bucket/truststore.jks). -* **schemaRegistryTruststorePasswordSecretId** : SecretId in secret manager where the password to access secret in truststore is stored. (Example: projects/your-project-number/secrets/your-secret-name/versions/your-secret-version). -* **schemaRegistryKeystoreLocation** : Keystore location that contains the SSL certificate and private key. (Example: /your-bucket/keystore.jks). -* **schemaRegistryKeystorePasswordSecretId** : SecretId in secret manager where the password to access the keystore file (Example: projects/your-project-number/secrets/your-secret-name/versions/your-secret-version). -* **schemaRegistryKeyPasswordSecretId** : SecretId of password required to access the client's private key stored within the keystore (Example: projects/your-project-number/secrets/your-secret-name/versions/your-secret-version). -* **schemaRegistryOauthClientId** : Client ID used to authenticate the Schema Registry client in OAUTH mode. Required for AVRO_CONFLUENT_WIRE_FORMAT message format. -* **schemaRegistryOauthClientSecretId** : The Google Cloud Secret Manager secret ID that contains the Client Secret to use to authenticate the Schema Registry client in OAUTH mode. Required for AVRO_CONFLUENT_WIRE_FORMAT message format. (Example: projects//secrets//versions/). -* **schemaRegistryOauthScope** : The access token scope used to authenticate the Schema Registry client in OAUTH mode. This field is optional, as the request can be made without a scope parameter passed. (Example: openid). -* **schemaRegistryOauthTokenEndpointUrl** : The HTTP(S)-based URL for the OAuth/OIDC identity provider used to authenticate the Schema Registry client in OAUTH mode. Required for AVRO_CONFLUENT_WIRE_FORMAT message format. -* **outputDeadletterTable** : Fully Qualified BigQuery table name for failed messages. Messages failed to reach the output table for different reasons (e.g., mismatched schema, malformed json) are written to this table.The table will be created by the template. (Example: your-project-id:your-dataset.your-table-name). +* **windowDuration**: The window duration/size in which data will be written to Cloud Storage. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). For example, `5m`. Defaults to: 5m. +* **outputFilenamePrefix**: The prefix to place on each windowed file. For example, `output-`. Defaults to: output. +* **numShards**: The maximum number of output shards produced when writing. A higher number of shards means higher throughput for writing to Cloud Storage, but potentially higher data aggregation cost across shards when processing output Cloud Storage files. Default value is decided by Dataflow. +* **enableCommitOffsets**: Commit offsets of processed messages to Kafka. If enabled, this will minimize the gaps or duplicate processing of messages when restarting the pipeline. Requires specifying the Consumer Group ID. Defaults to: false. +* **consumerGroupId**: The unique identifier for the consumer group that this pipeline belongs to. Required if Commit Offsets to Kafka is enabled. Defaults to empty. +* **kafkaReadOffset**: The starting point for reading messages when no committed offsets exist. The earliest starts from the beginning, the latest from the newest message. Defaults to: latest. +* **kafkaReadUsernameSecretId**: The Google Cloud Secret Manager secret ID that contains the Kafka username to use with `SASL_PLAIN` authentication. For example, `projects//secrets//versions/`. Defaults to empty. +* **kafkaReadPasswordSecretId**: The Google Cloud Secret Manager secret ID that contains the Kafka password to use with `SASL_PLAIN` authentication. For example, `projects//secrets//versions/`. Defaults to empty. +* **kafkaReadKeystoreLocation**: The Google Cloud Storage path to the Java KeyStore (JKS) file that contains the TLS certificate and private key to use when authenticating with the Kafka cluster. For example, `gs://your-bucket/keystore.jks`. +* **kafkaReadTruststoreLocation**: The Google Cloud Storage path to the Java TrustStore (JKS) file that contains the trusted certificates to use to verify the identity of the Kafka broker. +* **kafkaReadTruststorePasswordSecretId**: The Google Cloud Secret Manager secret ID that contains the password to use to access the Java TrustStore (JKS) file for Kafka TLS authentication For example, `projects//secrets//versions/`. +* **kafkaReadKeystorePasswordSecretId**: The Google Cloud Secret Manager secret ID that contains the password to use to access the Java KeyStore (JKS) file for Kafka TLS authentication. For example, `projects//secrets//versions/`. +* **kafkaReadKeyPasswordSecretId**: The Google Cloud Secret Manager secret ID that contains the password to use to access the private key within the Java KeyStore (JKS) file for Kafka TLS authentication. For example, `projects//secrets//versions/`. +* **schemaFormat**: The Kafka schema format. Can be provided as `SINGLE_SCHEMA_FILE` or `SCHEMA_REGISTRY`. If `SINGLE_SCHEMA_FILE` is specified, use the schema mentioned in the avro schema file for all messages. If `SCHEMA_REGISTRY` is specified, the messages can have either a single schema or multiple schemas. Defaults to: SINGLE_SCHEMA_FILE. +* **confluentAvroSchemaPath**: The Google Cloud Storage path to the single Avro schema file used to decode all of the messages in a topic. Defaults to empty. +* **schemaRegistryConnectionUrl**: The URL for the Confluent Schema Registry instance used to manage Avro schemas for message decoding. Defaults to empty. +* **binaryAvroSchemaPath**: The Google Cloud Storage path to the Avro schema file used to decode binary-encoded Avro messages. Defaults to empty. +* **schemaRegistryAuthenticationMode**: Schema Registry authentication mode. Can be NONE, TLS or OAUTH. Defaults to: NONE. +* **schemaRegistryTruststoreLocation**: Location of the SSL certificate where the trust store for authentication to Schema Registry are stored. For example, `/your-bucket/truststore.jks`. +* **schemaRegistryTruststorePasswordSecretId**: SecretId in secret manager where the password to access secret in truststore is stored. For example, `projects/your-project-number/secrets/your-secret-name/versions/your-secret-version`. +* **schemaRegistryKeystoreLocation**: Keystore location that contains the SSL certificate and private key. For example, `/your-bucket/keystore.jks`. +* **schemaRegistryKeystorePasswordSecretId**: SecretId in secret manager where the password to access the keystore file For example, `projects/your-project-number/secrets/your-secret-name/versions/your-secret-version`. +* **schemaRegistryKeyPasswordSecretId**: SecretId of password required to access the client's private key stored within the keystore For example, `projects/your-project-number/secrets/your-secret-name/versions/your-secret-version`. +* **schemaRegistryOauthClientId**: Client ID used to authenticate the Schema Registry client in OAUTH mode. Required for AVRO_CONFLUENT_WIRE_FORMAT message format. +* **schemaRegistryOauthClientSecretId**: The Google Cloud Secret Manager secret ID that contains the Client Secret to use to authenticate the Schema Registry client in OAUTH mode. Required for AVRO_CONFLUENT_WIRE_FORMAT message format. For example, `projects//secrets//versions/`. +* **schemaRegistryOauthScope**: The access token scope used to authenticate the Schema Registry client in OAUTH mode. This field is optional, as the request can be made without a scope parameter passed. For example, `openid`. +* **schemaRegistryOauthTokenEndpointUrl**: The HTTP(S)-based URL for the OAuth/OIDC identity provider used to authenticate the Schema Registry client in OAUTH mode. Required for AVRO_CONFLUENT_WIRE_FORMAT message format. +* **outputDeadletterTable**: Fully Qualified BigQuery table name for failed messages. Messages failed to reach the output table for different reasons (e.g., mismatched schema, malformed json) are written to this table.The table will be created by the template. For example, `your-project-id:your-dataset.your-table-name`. @@ -130,7 +130,7 @@ export TEMPLATE_SPEC_GCSPATH="gs://$BUCKET_NAME/templates/flex/Kafka_to_Gcs_Flex ### Required export READ_BOOTSTRAP_SERVER_AND_TOPIC= export OUTPUT_DIRECTORY= -export KAFKA_READ_AUTHENTICATION_MODE=APPLICATION_DEFAULT_CREDENTIALS +export KAFKA_READ_AUTHENTICATION_MODE=SASL_PLAIN export MESSAGE_FORMAT=AVRO_CONFLUENT_WIRE_FORMAT export USE_BIG_QUERY_DLQ=false @@ -221,7 +221,7 @@ export REGION=us-central1 ### Required export READ_BOOTSTRAP_SERVER_AND_TOPIC= export OUTPUT_DIRECTORY= -export KAFKA_READ_AUTHENTICATION_MODE=APPLICATION_DEFAULT_CREDENTIALS +export KAFKA_READ_AUTHENTICATION_MODE=SASL_PLAIN export MESSAGE_FORMAT=AVRO_CONFLUENT_WIRE_FORMAT export USE_BIG_QUERY_DLQ=false @@ -308,38 +308,38 @@ resource "google_dataflow_flex_template_job" "kafka_to_gcs_flex" { region = var.region parameters = { readBootstrapServerAndTopic = "" - outputDirectory = "gs://your-bucket/your-path/" - kafkaReadAuthenticationMode = "APPLICATION_DEFAULT_CREDENTIALS" + outputDirectory = "" + kafkaReadAuthenticationMode = "SASL_PLAIN" messageFormat = "AVRO_CONFLUENT_WIRE_FORMAT" useBigQueryDLQ = "false" # windowDuration = "5m" - # outputFilenamePrefix = "output-" + # outputFilenamePrefix = "output" # numShards = "0" # enableCommitOffsets = "false" # consumerGroupId = "" # kafkaReadOffset = "latest" - # kafkaReadUsernameSecretId = "projects//secrets//versions/" - # kafkaReadPasswordSecretId = "projects//secrets//versions/" - # kafkaReadKeystoreLocation = "gs://your-bucket/keystore.jks" + # kafkaReadUsernameSecretId = "" + # kafkaReadPasswordSecretId = "" + # kafkaReadKeystoreLocation = "" # kafkaReadTruststoreLocation = "" - # kafkaReadTruststorePasswordSecretId = "projects//secrets//versions/" - # kafkaReadKeystorePasswordSecretId = "projects//secrets//versions/" - # kafkaReadKeyPasswordSecretId = "projects//secrets//versions/" + # kafkaReadTruststorePasswordSecretId = "" + # kafkaReadKeystorePasswordSecretId = "" + # kafkaReadKeyPasswordSecretId = "" # schemaFormat = "SINGLE_SCHEMA_FILE" # confluentAvroSchemaPath = "" # schemaRegistryConnectionUrl = "" # binaryAvroSchemaPath = "" # schemaRegistryAuthenticationMode = "NONE" - # schemaRegistryTruststoreLocation = "/your-bucket/truststore.jks" - # schemaRegistryTruststorePasswordSecretId = "projects/your-project-number/secrets/your-secret-name/versions/your-secret-version" - # schemaRegistryKeystoreLocation = "/your-bucket/keystore.jks" - # schemaRegistryKeystorePasswordSecretId = "projects/your-project-number/secrets/your-secret-name/versions/your-secret-version" - # schemaRegistryKeyPasswordSecretId = "projects/your-project-number/secrets/your-secret-name/versions/your-secret-version" + # schemaRegistryTruststoreLocation = "" + # schemaRegistryTruststorePasswordSecretId = "" + # schemaRegistryKeystoreLocation = "" + # schemaRegistryKeystorePasswordSecretId = "" + # schemaRegistryKeyPasswordSecretId = "" # schemaRegistryOauthClientId = "" - # schemaRegistryOauthClientSecretId = "projects//secrets//versions/" - # schemaRegistryOauthScope = "openid" + # schemaRegistryOauthClientSecretId = "" + # schemaRegistryOauthScope = "" # schemaRegistryOauthTokenEndpointUrl = "" - # outputDeadletterTable = "your-project-id:your-dataset.your-table-name" + # outputDeadletterTable = "" } } ``` diff --git a/v2/kafka-to-kafka/README_Kafka_to_Kafka.md b/v2/kafka-to-kafka/README_Kafka_to_Kafka.md index 7439b745e8..f149feef43 100644 --- a/v2/kafka-to-kafka/README_Kafka_to_Kafka.md +++ b/v2/kafka-to-kafka/README_Kafka_to_Kafka.md @@ -13,30 +13,30 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **readBootstrapServerAndTopic** : Kafka Bootstrap server and topic to read the input from. (Example: localhost:9092;topic1,topic2). -* **kafkaReadAuthenticationMode** : The mode of authentication to use with the Kafka cluster. Use NONE for no authentication, SASL_PLAIN for SASL/PLAIN username and password, TLSfor certificate-based authentication. APPLICATION_DEFAULT_CREDENTIALS should be used only for Google Cloud Apache Kafka for BigQuery cluster since This allow you to authenticate with Google Cloud Apache Kafka for BigQuery using application default credentials. -* **writeBootstrapServerAndTopic** : Kafka topic to write the output to. -* **kafkaWriteAuthenticationMethod** : The mode of authentication to use with the Kafka cluster. Use NONE for no authentication, SASL_PLAIN for SASL/PLAIN username and password, and TLS for certificate-based authentication. Defaults to: APPLICATION_DEFAULT_CREDENTIALS. +* **readBootstrapServerAndTopic**: Kafka Bootstrap server and topic to read the input from. For example, `localhost:9092;topic1,topic2`. +* **kafkaReadAuthenticationMode**: The mode of authentication to use with the Kafka cluster. Use `NONE` for no authentication, `SASL_PLAIN` for SASL/PLAIN username and password, and `TLS` for certificate-based authentication. Apache Kafka for BigQuery only supports the `SASL_PLAIN` authentication mode. Defaults to: SASL_PLAIN. +* **writeBootstrapServerAndTopic**: Kafka topic to write the output to. +* **kafkaWriteAuthenticationMethod**: The mode of authentication to use with the Kafka cluster. Use NONE for no authentication, SASL_PLAIN for SASL/PLAIN username and password, and TLS for certificate-based authentication. Defaults to: APPLICATION_DEFAULT_CREDENTIALS. ### Optional parameters -* **enableCommitOffsets** : Commit offsets of processed messages to Kafka. If enabled, this will minimize the gaps or duplicate processing of messages when restarting the pipeline. Requires specifying the Consumer Group ID. Defaults to: false. -* **consumerGroupId** : The unique identifier for the consumer group that this pipeline belongs to. Required if Commit Offsets to Kafka is enabled. Defaults to empty. -* **kafkaReadOffset** : The starting point for reading messages when no committed offsets exist. The earliest starts from the beginning, the latest from the newest message. Defaults to: latest. -* **kafkaReadUsernameSecretId** : The Google Cloud Secret Manager secret ID that contains the Kafka username to use with SASL_PLAIN authentication. (Example: projects//secrets//versions/). Defaults to empty. -* **kafkaReadPasswordSecretId** : The Google Cloud Secret Manager secret ID that contains the Kafka password to use with SASL_PLAIN authentication. (Example: projects//secrets//versions/). Defaults to empty. -* **kafkaReadKeystoreLocation** : The Google Cloud Storage path to the Java KeyStore (JKS) file that contains the TLS certificate and private key to use when authenticating with the Kafka cluster. (Example: gs://your-bucket/keystore.jks). -* **kafkaReadTruststoreLocation** : The Google Cloud Storage path to the Java TrustStore (JKS) file that contains the trusted certificates to use to verify the identity of the Kafka broker. -* **kafkaReadTruststorePasswordSecretId** : The Google Cloud Secret Manager secret ID that contains the password to use to access the Java TrustStore (JKS) file for Kafka TLS authentication (Example: projects//secrets//versions/). -* **kafkaReadKeystorePasswordSecretId** : The Google Cloud Secret Manager secret ID that contains the password to use to access the Java KeyStore (JKS) file for Kafka TLS authentication. (Example: projects//secrets//versions/). -* **kafkaReadKeyPasswordSecretId** : The Google Cloud Secret Manager secret ID that contains the password to use to access the private key within the Java KeyStore (JKS) file for Kafka TLS authentication. (Example: projects//secrets//versions/). -* **kafkaWriteUsernameSecretId** : The Google Cloud Secret Manager secret ID that contains the Kafka username for SASL_PLAIN authentication with the destination Kafka cluster. (Example: projects//secrets//versions/). Defaults to empty. -* **kafkaWritePasswordSecretId** : The Google Cloud Secret Manager secret ID that contains the Kafka password to use for SASL_PLAIN authentication with the destination Kafka cluster. (Example: projects//secrets//versions/). Defaults to empty. -* **kafkaWriteKeystoreLocation** : The Google Cloud Storage path to the Java KeyStore (JKS) file that contains the TLS certificate and private key for authenticating with the destination Kafka cluster. (Example: gs:///.jks). -* **kafkaWriteTruststoreLocation** : The Google Cloud Storage path to the Java TrustStore (JKS) file that contains the trusted certificates to use to verify the identity of the destination Kafka broker. -* **kafkaWriteTruststorePasswordSecretId** : The Google Cloud Secret Manager secret ID that contains the password to use to access the Java TrustStore (JKS) file for TLS authentication with the destination Kafka cluster. (Example: projects//secrets//versions/). -* **kafkaWriteKeystorePasswordSecretId** : The Google Cloud Secret Manager secret ID that contains the password to access the Java KeyStore (JKS) file to use for TLS authentication with the destination Kafka cluster. (Example: projects//secrets//versions/). -* **kafkaWriteKeyPasswordSecretId** : The Google Cloud Secret Manager secret ID that contains the password to use to access the private key within the Java KeyStore (JKS) file for TLS authentication with the destination Kafka cluster. (Example: projects//secrets//versions/). +* **enableCommitOffsets**: Commit offsets of processed messages to Kafka. If enabled, this will minimize the gaps or duplicate processing of messages when restarting the pipeline. Requires specifying the Consumer Group ID. Defaults to: false. +* **consumerGroupId**: The unique identifier for the consumer group that this pipeline belongs to. Required if Commit Offsets to Kafka is enabled. Defaults to empty. +* **kafkaReadOffset**: The starting point for reading messages when no committed offsets exist. The earliest starts from the beginning, the latest from the newest message. Defaults to: latest. +* **kafkaReadUsernameSecretId**: The Google Cloud Secret Manager secret ID that contains the Kafka username to use with `SASL_PLAIN` authentication. For example, `projects//secrets//versions/`. Defaults to empty. +* **kafkaReadPasswordSecretId**: The Google Cloud Secret Manager secret ID that contains the Kafka password to use with `SASL_PLAIN` authentication. For example, `projects//secrets//versions/`. Defaults to empty. +* **kafkaReadKeystoreLocation**: The Google Cloud Storage path to the Java KeyStore (JKS) file that contains the TLS certificate and private key to use when authenticating with the Kafka cluster. For example, `gs://your-bucket/keystore.jks`. +* **kafkaReadTruststoreLocation**: The Google Cloud Storage path to the Java TrustStore (JKS) file that contains the trusted certificates to use to verify the identity of the Kafka broker. +* **kafkaReadTruststorePasswordSecretId**: The Google Cloud Secret Manager secret ID that contains the password to use to access the Java TrustStore (JKS) file for Kafka TLS authentication For example, `projects//secrets//versions/`. +* **kafkaReadKeystorePasswordSecretId**: The Google Cloud Secret Manager secret ID that contains the password to use to access the Java KeyStore (JKS) file for Kafka TLS authentication. For example, `projects//secrets//versions/`. +* **kafkaReadKeyPasswordSecretId**: The Google Cloud Secret Manager secret ID that contains the password to use to access the private key within the Java KeyStore (JKS) file for Kafka TLS authentication. For example, `projects//secrets//versions/`. +* **kafkaWriteUsernameSecretId**: The Google Cloud Secret Manager secret ID that contains the Kafka username for SASL_PLAIN authentication with the destination Kafka cluster. For example, `projects//secrets//versions/`. Defaults to empty. +* **kafkaWritePasswordSecretId**: The Google Cloud Secret Manager secret ID that contains the Kafka password to use for SASL_PLAIN authentication with the destination Kafka cluster. For example, `projects//secrets//versions/`. Defaults to empty. +* **kafkaWriteKeystoreLocation**: The Google Cloud Storage path to the Java KeyStore (JKS) file that contains the TLS certificate and private key for authenticating with the destination Kafka cluster. For example, `gs:///.jks`. +* **kafkaWriteTruststoreLocation**: The Google Cloud Storage path to the Java TrustStore (JKS) file that contains the trusted certificates to use to verify the identity of the destination Kafka broker. +* **kafkaWriteTruststorePasswordSecretId**: The Google Cloud Secret Manager secret ID that contains the password to use to access the Java TrustStore (JKS) file for TLS authentication with the destination Kafka cluster. For example, `projects//secrets//versions/`. +* **kafkaWriteKeystorePasswordSecretId**: The Google Cloud Secret Manager secret ID that contains the password to access the Java KeyStore (JKS) file to use for TLS authentication with the destination Kafka cluster. For example, `projects//secrets//versions/`. +* **kafkaWriteKeyPasswordSecretId**: The Google Cloud Secret Manager secret ID that contains the password to use to access the private key within the Java KeyStore (JKS) file for TLS authentication with the destination Kafka cluster. For example, `projects//secrets//versions/`. @@ -265,20 +265,20 @@ resource "google_dataflow_flex_template_job" "kafka_to_kafka" { # enableCommitOffsets = "false" # consumerGroupId = "" # kafkaReadOffset = "latest" - # kafkaReadUsernameSecretId = "projects//secrets//versions/" - # kafkaReadPasswordSecretId = "projects//secrets//versions/" - # kafkaReadKeystoreLocation = "gs://your-bucket/keystore.jks" + # kafkaReadUsernameSecretId = "" + # kafkaReadPasswordSecretId = "" + # kafkaReadKeystoreLocation = "" # kafkaReadTruststoreLocation = "" - # kafkaReadTruststorePasswordSecretId = "projects//secrets//versions/" - # kafkaReadKeystorePasswordSecretId = "projects//secrets//versions/" - # kafkaReadKeyPasswordSecretId = "projects//secrets//versions/" - # kafkaWriteUsernameSecretId = "projects//secrets//versions/" - # kafkaWritePasswordSecretId = "projects//secrets//versions/" - # kafkaWriteKeystoreLocation = "gs:///.jks" + # kafkaReadTruststorePasswordSecretId = "" + # kafkaReadKeystorePasswordSecretId = "" + # kafkaReadKeyPasswordSecretId = "" + # kafkaWriteUsernameSecretId = "" + # kafkaWritePasswordSecretId = "" + # kafkaWriteKeystoreLocation = "" # kafkaWriteTruststoreLocation = "" - # kafkaWriteTruststorePasswordSecretId = "projects//secrets//versions/" - # kafkaWriteKeystorePasswordSecretId = "projects//secrets//versions/" - # kafkaWriteKeyPasswordSecretId = "projects//secrets//versions/" + # kafkaWriteTruststorePasswordSecretId = "" + # kafkaWriteKeystorePasswordSecretId = "" + # kafkaWriteKeyPasswordSecretId = "" } } ``` diff --git a/v2/kafka-to-pubsub/src/main/java/com/google/cloud/teleport/v2/options/KafkaToPubsubOptions.java b/v2/kafka-to-pubsub/src/main/java/com/google/cloud/teleport/v2/options/KafkaToPubsubOptions.java index 71602eca71..d2ce1897af 100644 --- a/v2/kafka-to-pubsub/src/main/java/com/google/cloud/teleport/v2/options/KafkaToPubsubOptions.java +++ b/v2/kafka-to-pubsub/src/main/java/com/google/cloud/teleport/v2/options/KafkaToPubsubOptions.java @@ -57,9 +57,8 @@ public interface KafkaToPubsubOptions order = 3, groupName = "Target", description = "Output Pub/Sub topic", - helpText = - "The name of the topic to which data should published, in the format of 'projects/your-project-id/topics/your-topic-name'", - example = "projects/your-project-id/topics/your-topic-name") + helpText = "The name of the topic to publish data to.", + example = "projects//topics/") @Validation.Required String getOutputTopic(); diff --git a/v2/kinesis-to-pubsub/README_Kinesis_To_Pubsub.md b/v2/kinesis-to-pubsub/README_Kinesis_To_Pubsub.md index dc8cf7a4c6..6857f688ea 100644 --- a/v2/kinesis-to-pubsub/README_Kinesis_To_Pubsub.md +++ b/v2/kinesis-to-pubsub/README_Kinesis_To_Pubsub.md @@ -13,15 +13,15 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **secretId1** : First Secret ID containing aws key id. -* **secretId2** : Second Secret ID containing aws key id. -* **awsRegion** : AWS Region. -* **kinesisDataStream** : Name of the Kinesis Data stream to read from. Enter the full name of the Kinesis Data stream. -* **outputPubsubTopic** : The name of the topic to which data should published, in the format of 'projects/your-project-id/topics/your-topic-name' (Example: projects/your-project-id/topics/your-topic-name). +* **secretId1**: First Secret ID containing aws key id. +* **secretId2**: Second Secret ID containing aws key id. +* **awsRegion**: AWS Region. +* **kinesisDataStream**: Name of the Kinesis Data stream to read from. Enter the full name of the Kinesis Data stream. +* **outputPubsubTopic**: The name of the topic to which data should published, in the format of 'projects/your-project-id/topics/your-topic-name' For example, `projects/your-project-id/topics/your-topic-name`. ### Optional parameters -* **awsDataFormat** : Data format of input. +* **awsDataFormat**: Data format of input. @@ -202,7 +202,7 @@ resource "google_dataflow_flex_template_job" "kinesis_to_pubsub" { secretId2 = "" awsRegion = "" kinesisDataStream = "" - outputPubsubTopic = "projects/your-project-id/topics/your-topic-name" + outputPubsubTopic = "" # awsDataFormat = "" } } diff --git a/v2/mongodb-to-googlecloud/README_MongoDB_to_BigQuery.md b/v2/mongodb-to-googlecloud/README_MongoDB_to_BigQuery.md index 25c58a869d..038f2a77f8 100644 --- a/v2/mongodb-to-googlecloud/README_MongoDB_to_BigQuery.md +++ b/v2/mongodb-to-googlecloud/README_MongoDB_to_BigQuery.md @@ -18,21 +18,21 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **mongoDbUri** : The MongoDB connection URI in the format `mongodb+srv://:@.`. -* **database** : Database in MongoDB to read the collection from. (Example: my-db). -* **collection** : Name of the collection inside MongoDB database. (Example: my-collection). -* **userOption** : `FLATTEN`, `JSON`, or `NONE`. `FLATTEN` flattens the documents to the single level. `JSON` stores document in BigQuery JSON format. `NONE` stores the whole document as a JSON-formatted STRING. Defaults to: NONE. -* **outputTableSpec** : The BigQuery table to write to. For example, `bigquery-project:dataset.output_table`. +* **mongoDbUri**: The MongoDB connection URI in the format `mongodb+srv://:@.`. +* **database**: Database in MongoDB to read the collection from. For example, `my-db`. +* **collection**: Name of the collection inside MongoDB database. For example, `my-collection`. +* **userOption**: `FLATTEN`, `JSON`, or `NONE`. `FLATTEN` flattens the documents to the single level. `JSON` stores document in BigQuery JSON format. `NONE` stores the whole document as a JSON-formatted STRING. Defaults to: NONE. +* **outputTableSpec**: The BigQuery table to write to. For example, `bigquery-project:dataset.output_table`. ### Optional parameters -* **KMSEncryptionKey** : Cloud KMS Encryption Key to decrypt the mongodb uri connection string. If Cloud KMS key is passed in, the mongodb uri connection string must all be passed in encrypted. (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **filter** : Bson filter in json format. (Example: { "val": { $gt: 0, $lt: 9 }}). -* **useStorageWriteApi** : If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. -* **bigQuerySchemaPath** : The Cloud Storage path for the BigQuery JSON schema. (Example: gs://your-bucket/your-schema.json). -* **javascriptDocumentTransformGcsPath** : The Cloud Storage URI of the `.js` file that defines the JavaScript user-defined function (UDF) to use. (Example: gs://your-bucket/your-transforms/*.js). -* **javascriptDocumentTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is myTransform. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). (Example: transform). +* **KMSEncryptionKey**: Cloud KMS Encryption Key to decrypt the mongodb uri connection string. If Cloud KMS key is passed in, the mongodb uri connection string must all be passed in encrypted. For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **filter**: Bson filter in json format. For example, `{ "val": { $gt: 0, $lt: 9 }}`. +* **useStorageWriteApi**: If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **bigQuerySchemaPath**: The Cloud Storage path for the BigQuery JSON schema. For example, `gs://your-bucket/your-schema.json`. +* **javascriptDocumentTransformGcsPath**: The Cloud Storage URI of the `.js` file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://your-bucket/your-transforms/*.js`. +* **javascriptDocumentTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is myTransform. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). For example, `transform`. @@ -228,17 +228,17 @@ resource "google_dataflow_flex_template_job" "mongodb_to_bigquery" { region = var.region parameters = { mongoDbUri = "" - database = "my-db" - collection = "my-collection" + database = "" + collection = "" userOption = "NONE" outputTableSpec = "" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" - # filter = "{ "val": { $gt: 0, $lt: 9 }}" + # KMSEncryptionKey = "" + # filter = "" # useStorageWriteApi = "false" # useStorageWriteApiAtLeastOnce = "false" - # bigQuerySchemaPath = "gs://your-bucket/your-schema.json" - # javascriptDocumentTransformGcsPath = "gs://your-bucket/your-transforms/*.js" - # javascriptDocumentTransformFunctionName = "transform" + # bigQuerySchemaPath = "" + # javascriptDocumentTransformGcsPath = "" + # javascriptDocumentTransformFunctionName = "" } } ``` diff --git a/v2/mongodb-to-googlecloud/README_MongoDB_to_BigQuery_CDC.md b/v2/mongodb-to-googlecloud/README_MongoDB_to_BigQuery_CDC.md index 5dbf51e129..172a7a8769 100644 --- a/v2/mongodb-to-googlecloud/README_MongoDB_to_BigQuery_CDC.md +++ b/v2/mongodb-to-googlecloud/README_MongoDB_to_BigQuery_CDC.md @@ -19,24 +19,24 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **mongoDbUri** : The MongoDB connection URI in the format `mongodb+srv://:@.`. -* **database** : Database in MongoDB to read the collection from. (Example: my-db). -* **collection** : Name of the collection inside MongoDB database. (Example: my-collection). -* **userOption** : `FLATTEN`, `JSON`, or `NONE`. `FLATTEN` flattens the documents to the single level. `JSON` stores document in BigQuery JSON format. `NONE` stores the whole document as a JSON-formatted STRING. Defaults to: NONE. -* **inputTopic** : The Pub/Sub input topic to read from, in the format of projects//topics/. -* **outputTableSpec** : The BigQuery table to write to. For example, `bigquery-project:dataset.output_table`. +* **mongoDbUri**: The MongoDB connection URI in the format `mongodb+srv://:@.`. +* **database**: Database in MongoDB to read the collection from. For example, `my-db`. +* **collection**: Name of the collection inside MongoDB database. For example, `my-collection`. +* **userOption**: `FLATTEN`, `JSON`, or `NONE`. `FLATTEN` flattens the documents to the single level. `JSON` stores document in BigQuery JSON format. `NONE` stores the whole document as a JSON-formatted STRING. Defaults to: NONE. +* **inputTopic**: The Pub/Sub input topic to read from, in the format of `projects//topics/`. +* **outputTableSpec**: The BigQuery table to write to. For example, `bigquery-project:dataset.output_table`. ### Optional parameters -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly- once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. -* **KMSEncryptionKey** : Cloud KMS Encryption Key to decrypt the mongodb uri connection string. If Cloud KMS key is passed in, the mongodb uri connection string must all be passed in encrypted. (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **filter** : Bson filter in json format. (Example: { "val": { $gt: 0, $lt: 9 }}). -* **useStorageWriteApi** : If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **numStorageWriteApiStreams** : When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. -* **storageWriteApiTriggeringFrequencySec** : When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. -* **bigQuerySchemaPath** : The Cloud Storage path for the BigQuery JSON schema. (Example: gs://your-bucket/your-schema.json). -* **javascriptDocumentTransformGcsPath** : The Cloud Storage URI of the `.js` file that defines the JavaScript user-defined function (UDF) to use. (Example: gs://your-bucket/your-transforms/*.js). -* **javascriptDocumentTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is myTransform. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). (Example: transform). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly- once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **KMSEncryptionKey**: Cloud KMS Encryption Key to decrypt the mongodb uri connection string. If Cloud KMS key is passed in, the mongodb uri connection string must all be passed in encrypted. For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **filter**: Bson filter in json format. For example, `{ "val": { $gt: 0, $lt: 9 }}`. +* **useStorageWriteApi**: If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **numStorageWriteApiStreams**: When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. +* **storageWriteApiTriggeringFrequencySec**: When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. +* **bigQuerySchemaPath**: The Cloud Storage path for the BigQuery JSON schema. For example, `gs://your-bucket/your-schema.json`. +* **javascriptDocumentTransformGcsPath**: The Cloud Storage URI of the `.js` file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://your-bucket/your-transforms/*.js`. +* **javascriptDocumentTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is myTransform. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). For example, `transform`. @@ -241,20 +241,20 @@ resource "google_dataflow_flex_template_job" "mongodb_to_bigquery_cdc" { region = var.region parameters = { mongoDbUri = "" - database = "my-db" - collection = "my-collection" + database = "" + collection = "" userOption = "NONE" inputTopic = "" outputTableSpec = "" # useStorageWriteApiAtLeastOnce = "false" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" - # filter = "{ "val": { $gt: 0, $lt: 9 }}" + # KMSEncryptionKey = "" + # filter = "" # useStorageWriteApi = "false" # numStorageWriteApiStreams = "0" # storageWriteApiTriggeringFrequencySec = "" - # bigQuerySchemaPath = "gs://your-bucket/your-schema.json" - # javascriptDocumentTransformGcsPath = "gs://your-bucket/your-transforms/*.js" - # javascriptDocumentTransformFunctionName = "transform" + # bigQuerySchemaPath = "" + # javascriptDocumentTransformGcsPath = "" + # javascriptDocumentTransformFunctionName = "" } } ``` diff --git a/v2/mongodb-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/mongodb/options/MongoDbToBigQueryOptions.java b/v2/mongodb-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/mongodb/options/MongoDbToBigQueryOptions.java index 7730361a81..c0b7d01bef 100644 --- a/v2/mongodb-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/mongodb/options/MongoDbToBigQueryOptions.java +++ b/v2/mongodb-to-googlecloud/src/main/java/com/google/cloud/teleport/v2/mongodb/options/MongoDbToBigQueryOptions.java @@ -105,7 +105,7 @@ public interface PubSubOptions extends PipelineOptions, DataflowPipelineOptions groupName = "Source", description = "Pub/Sub input topic", helpText = - "The Pub/Sub input topic to read from, in the format of projects//topics/.") + "The Pub/Sub input topic to read from, in the format of `projects//topics/`.") String getInputTopic(); void setInputTopic(String inputTopic); diff --git a/v2/mqtt-to-pubsub/README_Mqtt_to_PubSub.md b/v2/mqtt-to-pubsub/README_Mqtt_to_PubSub.md index 7f6ff8fd2e..41137b1187 100644 --- a/v2/mqtt-to-pubsub/README_Mqtt_to_PubSub.md +++ b/v2/mqtt-to-pubsub/README_Mqtt_to_PubSub.md @@ -19,14 +19,14 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputTopic** : The name of the MQTT topic that data is read from. (Example: topic). -* **outputTopic** : The name of the output Pub/Sub topic that data is written to. (Example: projects/your-project-id/topics/your-topic-name). -* **username** : The username to use for authentication on the MQTT server. (Example: sampleusername). -* **password** : The password associated with the provided username. (Example: samplepassword). +* **inputTopic**: The name of the MQTT topic that data is read from. For example, `topic`. +* **outputTopic**: The name of the output Pub/Sub topic that data is written to. For example, `projects/your-project-id/topics/your-topic-name`. +* **username**: The username to use for authentication on the MQTT server. For example, `sampleusername`. +* **password**: The password associated with the provided username. For example, `samplepassword`. ### Optional parameters -* **brokerServer** : The MQTT broker server IP or host. (Example: tcp://host:1883). +* **brokerServer**: The MQTT broker server IP or host. For example, `tcp://host:1883`. @@ -200,11 +200,11 @@ resource "google_dataflow_flex_template_job" "mqtt_to_pubsub" { name = "mqtt-to-pubsub" region = var.region parameters = { - inputTopic = "topic" - outputTopic = "projects/your-project-id/topics/your-topic-name" - username = "sampleusername" - password = "samplepassword" - # brokerServer = "tcp://host:1883" + inputTopic = "" + outputTopic = "" + username = "" + password = "" + # brokerServer = "" } } ``` diff --git a/v2/mysql-to-googlecloud/README_Jdbc_to_PubSub.md b/v2/mysql-to-googlecloud/README_Jdbc_to_PubSub.md index 67ea295e9b..0a81250a78 100644 --- a/v2/mysql-to-googlecloud/README_Jdbc_to_PubSub.md +++ b/v2/mysql-to-googlecloud/README_Jdbc_to_PubSub.md @@ -18,20 +18,20 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **driverClassName** : The JDBC driver class name. (Example: com.mysql.jdbc.Driver). -* **connectionUrl** : The JDBC connection URL string. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example: 'echo -n "jdbc:mysql://some-host:3306/sampledb" | gcloud kms encrypt --location= --keyring= --key= --plaintext-file=- --ciphertext-file=- | base64' (Example: jdbc:mysql://some-host:3306/sampledb). -* **driverJars** : Comma-separated Cloud Storage paths for JDBC drivers. (Example: gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar). -* **query** : The query to run on the source to extract the data. (Example: select * from sampledb.sample_table). -* **outputTopic** : The Pub/Sub topic to publish to, in the format projects//topics/. (Example: projects/your-project-id/topics/your-topic-name). +* **driverClassName**: The JDBC driver class name. For example, `com.mysql.jdbc.Driver`. +* **connectionUrl**: The JDBC connection URL string. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example: 'echo -n "jdbc:mysql://some-host:3306/sampledb" | gcloud kms encrypt --location= --keyring= --key= --plaintext-file=- --ciphertext-file=- | base64' For example, `jdbc:mysql://some-host:3306/sampledb`. +* **driverJars**: Comma-separated Cloud Storage paths for JDBC drivers. For example, `gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar`. +* **query**: The query to run on the source to extract the data. For example, `select * from sampledb.sample_table`. +* **outputTopic**: The Pub/Sub topic to publish to. For example, `projects//topics/`. ### Optional parameters -* **username** : The username to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_username' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. -* **password** : The password to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_password' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. -* **connectionProperties** : The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`. (Example: unicode=true;characterEncoding=UTF-8). -* **KMSEncryptionKey** : The Cloud KMS Encryption Key to use to decrypt the username, password, and connection string. If a Cloud KMS key is passed in, the username, password, and connection string must all be passed in encrypted and base64 encoded. (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **disabledAlgorithms** : Comma separated algorithms to disable. If this value is set to none, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. (Example: SSLv3, RC4). -* **extraFilesToStage** : Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. (Example: gs:///file.txt,projects//secrets//versions/). +* **username**: The username to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_username' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. +* **password**: The password to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_password' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. +* **connectionProperties**: The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`. For example, `unicode=true;characterEncoding=UTF-8`. +* **KMSEncryptionKey**: The Cloud KMS Encryption Key to use to decrypt the username, password, and connection string. If a Cloud KMS key is passed in, the username, password, and connection string must all be passed in encrypted and base64 encoded. For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **disabledAlgorithms**: Comma separated algorithms to disable. If this value is set to `none`, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. For example, `SSLv3, RC4`. +* **extraFilesToStage**: Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. For example, `gs:///file.txt,projects//secrets//versions/`. @@ -223,17 +223,17 @@ resource "google_dataflow_flex_template_job" "jdbc_to_pubsub" { name = "jdbc-to-pubsub" region = var.region parameters = { - driverClassName = "com.mysql.jdbc.Driver" - connectionUrl = "jdbc:mysql://some-host:3306/sampledb" - driverJars = "gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar" - query = "select * from sampledb.sample_table" - outputTopic = "projects/your-project-id/topics/your-topic-name" + driverClassName = "" + connectionUrl = "" + driverJars = "" + query = "" + outputTopic = "" # username = "" # password = "" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" - # disabledAlgorithms = "SSLv3, RC4" - # extraFilesToStage = "gs:///file.txt,projects//secrets//versions/" + # connectionProperties = "" + # KMSEncryptionKey = "" + # disabledAlgorithms = "" + # extraFilesToStage = "" } } ``` diff --git a/v2/mysql-to-googlecloud/README_Jdbc_to_PubSub_Auto.md b/v2/mysql-to-googlecloud/README_Jdbc_to_PubSub_Auto.md index ccdb7a6f23..de73de30df 100644 --- a/v2/mysql-to-googlecloud/README_Jdbc_to_PubSub_Auto.md +++ b/v2/mysql-to-googlecloud/README_Jdbc_to_PubSub_Auto.md @@ -15,23 +15,23 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **driverClassName** : JDBC driver class name to use. (Example: com.mysql.jdbc.Driver). -* **connectionUrl** : Url connection string to connect to the JDBC source. Connection string can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. (Example: jdbc:mysql://some-host:3306/sampledb). -* **driverJars** : Comma separate Cloud Storage paths for JDBC drivers. (Example: gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar). -* **query** : Query to be executed on the source to extract the data. (Example: select * from sampledb.sample_table). -* **outputTopic** : The name of the topic to which data should published, in the format of 'projects/your-project-id/topics/your-topic-name' (Example: projects/your-project-id/topics/your-topic-name). +* **driverClassName**: JDBC driver class name to use. For example, `com.mysql.jdbc.Driver`. +* **connectionUrl**: Url connection string to connect to the JDBC source. Connection string can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. For example, `jdbc:mysql://some-host:3306/sampledb`. +* **driverJars**: Comma separate Cloud Storage paths for JDBC drivers. For example, `gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar`. +* **query**: Query to be executed on the source to extract the data. For example, `select * from sampledb.sample_table`. +* **outputTopic**: The name of the topic to publish data to. For example, `projects//topics/`. ### Optional parameters -* **username** : User name to be used for the JDBC connection. User name can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. -* **password** : Password to be used for the JDBC connection. Password can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. -* **connectionProperties** : Properties string to use for the JDBC connection. Format of the string must be [propertyName=property;]*. (Example: unicode=true;characterEncoding=UTF-8). -* **KMSEncryptionKey** : If this parameter is provided, password, user name and connection string should all be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **partitionColumn** : If this parameter is provided (along with `table`), JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only Long partition columns are supported. -* **table** : Table to read from using partitions. This parameter also accepts a subquery in parentheses. (Example: (select id, name from Person) as subq). -* **numPartitions** : The number of partitions. This, along with the lower and upper bound, form partitions strides for generated WHERE clause expressions used to split the partition column evenly. When the input is less than 1, the number is set to 1. -* **lowerBound** : Lower bound used in the partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). -* **upperBound** : Upper bound used in partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). +* **username**: User name to be used for the JDBC connection. User name can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. +* **password**: Password to be used for the JDBC connection. Password can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. +* **connectionProperties**: Properties string to use for the JDBC connection. Format of the string must be [propertyName=property;]*. For example, `unicode=true;characterEncoding=UTF-8`. +* **KMSEncryptionKey**: If this parameter is provided, password, user name and connection string should all be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **partitionColumn**: If this parameter is provided (along with `table`), JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only Long partition columns are supported. +* **table**: Table to read from using partitions. This parameter also accepts a subquery in parentheses. For example, `(select id, name from Person) as subq`. +* **numPartitions**: The number of partitions. This, along with the lower and upper bound, form partitions strides for generated WHERE clause expressions used to split the partition column evenly. When the input is less than 1, the number is set to 1. +* **lowerBound**: Lower bound used in the partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). +* **upperBound**: Upper bound used in partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). @@ -232,17 +232,17 @@ resource "google_dataflow_flex_template_job" "jdbc_to_pubsub_auto" { name = "jdbc-to-pubsub-auto" region = var.region parameters = { - driverClassName = "com.mysql.jdbc.Driver" - connectionUrl = "jdbc:mysql://some-host:3306/sampledb" - driverJars = "gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar" - query = "select * from sampledb.sample_table" - outputTopic = "projects/your-project-id/topics/your-topic-name" + driverClassName = "" + connectionUrl = "" + driverJars = "" + query = "" + outputTopic = "" # username = "" # password = "" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" + # connectionProperties = "" + # KMSEncryptionKey = "" # partitionColumn = "" - # table = "(select id, name from Person) as subq" + # table = "
" # numPartitions = "" # lowerBound = "" # upperBound = "" diff --git a/v2/mysql-to-googlecloud/README_MySQL_to_BigQuery.md b/v2/mysql-to-googlecloud/README_MySQL_to_BigQuery.md index 55f077b121..093f1c08fd 100644 --- a/v2/mysql-to-googlecloud/README_MySQL_to_BigQuery.md +++ b/v2/mysql-to-googlecloud/README_MySQL_to_BigQuery.md @@ -23,34 +23,31 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **connectionURL** : The JDBC connection URL string. For example, `jdbc:mysql://some-host:3306/sampledb`. Can be passed in as a string that's Base64-encoded and then encrypted with a Cloud KMS key. (Example: jdbc:mysql://some-host:3306/sampledb). -* **outputTable** : The BigQuery output table location. (Example: :.). -* **bigQueryLoadingTemporaryDirectory** : The temporary directory for the BigQuery loading process. (Example: gs://your-bucket/your-files/temp_dir). +* **connectionURL**: The JDBC connection URL string. For example, `jdbc:mysql://some-host:3306/sampledb`. Can be passed in as a string that's Base64-encoded and then encrypted with a Cloud KMS key. For example, `jdbc:mysql://some-host:3306/sampledb`. +* **outputTable**: The BigQuery output table location. For example, `:.`. +* **bigQueryLoadingTemporaryDirectory**: The temporary directory for the BigQuery loading process. For example, `gs://your-bucket/your-files/temp_dir`. ### Optional parameters -* **connectionProperties** : The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`.For more information, see Configuration Properties (https://dev.mysql.com/doc/connector-j/en/connector-j-reference-configuration-properties.html) in the MySQL documentation. (Example: unicode=true;characterEncoding=UTF-8). -* **username** : The username to use for the JDBC connection. Can be passed in as a string that's encrypted with a Cloud KMS key, or can be a Secret Manager secret in the form projects/{project}/secrets/{secret}/versions/{secret_version}. -* **password** : The password to use for the JDBC connection. Can be passed in as a string that's encrypted with a Cloud KMS key, or can be a Secret Manager secret in the form projects/{project}/secrets/{secret}/versions/{secret_version}. -* **query** : The query to run on the source to extract the data. Note that some JDBC SQL and BigQuery types, although sharing the same name, have some differences. Some important SQL -> BigQuery type mappings to keep in mind are: -DATETIME --> TIMESTAMP - -Type casting may be required if your schemas do not match. This parameter can be set to a gs:// path pointing to a file in Cloud Storage to load the query from. The file encoding should be UTF-8. (Example: select * from sampledb.sample_table). -* **KMSEncryptionKey** : The Cloud KMS encryption key to use to decrypt the username, password, and connection string. If you pass in a Cloud KMS key, you must also encrypt the username, password, and connection string. (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **useColumnAlias** : If set to `true`, the pipeline uses the column alias (`AS`) instead of the column name to map the rows to BigQuery. Defaults to `false`. -* **isTruncate** : If set to `true`, the pipeline truncates before loading data into BigQuery. Defaults to `false`, which causes the pipeline to append data. -* **partitionColumn** : If this parameter is provided with the name of the `table` defined as an optional parameter, JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only supports `Long` partition columns. -* **table** : The table to read from when using partitions. This parameter also accepts a subquery in parentheses. (Example: (select id, name from Person) as subq). -* **numPartitions** : The number of partitions. With the lower and upper bound, this value forms partition strides for generated `WHERE` clause expressions that are used to split the partition column evenly. When the input is less than `1`, the number is set to `1`. -* **lowerBound** : The lower bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. -* **upperBound** : The upper bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. -* **fetchSize** : The number of rows to be fetched from database at a time. Not used for partitioned reads. Defaults to: 50000. -* **createDisposition** : The BigQuery CreateDisposition to use. For example, `CREATE_IF_NEEDED` or `CREATE_NEVER`. Defaults to: CREATE_NEVER. -* **bigQuerySchemaPath** : The Cloud Storage path for the BigQuery JSON schema. If `createDisposition` is set to CREATE_IF_NEEDED, this parameter must be specified. (Example: gs://your-bucket/your-schema.json). -* **disabledAlgorithms** : Comma separated algorithms to disable. If this value is set to none, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. (Example: SSLv3, RC4). -* **extraFilesToStage** : Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. (Example: gs:///file.txt,projects//secrets//versions/). -* **useStorageWriteApi** : If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **connectionProperties**: The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`.For more information, see Configuration Properties (https://dev.mysql.com/doc/connector-j/8.1/en/connector-j-reference-configuration-properties.html) in the MySQL documentation. For example, `unicode=true;characterEncoding=UTF-8`. +* **username**: The username to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. Remove whitespace characters from the Base64-encoded string. +* **password**: The password to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. Remove whitespace characters from the Base64-encoded string. +* **query**: The query to run on the source to extract the data. Note that some JDBC SQL and BigQuery types, although sharing the same name, have some differences. Some important SQL -> BigQuery type mappings to keep in mind are `DATETIME --> TIMESTAMP`. Type casting may be required if your schemas do not match. For example, `select * from sampledb.sample_table`. +* **KMSEncryptionKey**: The Cloud KMS encryption key to use to decrypt the username, password, and connection string. If you pass in a Cloud KMS key, you must also encrypt the username, password, and connection string. For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **useColumnAlias**: If set to `true`, the pipeline uses the column alias (`AS`) instead of the column name to map the rows to BigQuery. Defaults to `false`. +* **isTruncate**: If set to `true`, the pipeline truncates before loading data into BigQuery. Defaults to `false`, which causes the pipeline to append data. +* **partitionColumn**: If this parameter is provided with the name of the `table` defined as an optional parameter, JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only supports `Long` partition columns. +* **table**: The table to read from when using partitions. This parameter also accepts a subquery in parentheses. For example, `(select id, name from Person) as subq`. +* **numPartitions**: The number of partitions. With the lower and upper bound, this value forms partition strides for generated `WHERE` clause expressions that are used to split the partition column evenly. When the input is less than `1`, the number is set to `1`. +* **lowerBound**: The lower bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. +* **upperBound**: The upper bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. +* **fetchSize**: The number of rows to be fetched from database at a time. Not used for partitioned reads. Defaults to: 50000. +* **createDisposition**: The BigQuery CreateDisposition to use. For example, `CREATE_IF_NEEDED` or `CREATE_NEVER`. Defaults to: CREATE_NEVER. +* **bigQuerySchemaPath**: The Cloud Storage path for the BigQuery JSON schema. If `createDisposition` is set to `CREATE_IF_NEEDED`, this parameter must be specified. For example, `gs://your-bucket/your-schema.json`. +* **disabledAlgorithms**: Comma separated algorithms to disable. If this value is set to `none`, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. For example, `SSLv3, RC4`. +* **extraFilesToStage**: Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. For example, `gs:///file.txt,projects//secrets//versions/`. +* **useStorageWriteApi**: If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. @@ -275,26 +272,26 @@ resource "google_dataflow_flex_template_job" "mysql_to_bigquery" { name = "mysql-to-bigquery" region = var.region parameters = { - connectionURL = "jdbc:mysql://some-host:3306/sampledb" - outputTable = ":." - bigQueryLoadingTemporaryDirectory = "gs://your-bucket/your-files/temp_dir" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" + connectionURL = "" + outputTable = "" + bigQueryLoadingTemporaryDirectory = "" + # connectionProperties = "" # username = "" # password = "" - # query = "select * from sampledb.sample_table" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" + # query = "" + # KMSEncryptionKey = "" # useColumnAlias = "false" # isTruncate = "false" # partitionColumn = "" - # table = "(select id, name from Person) as subq" + # table = "
" # numPartitions = "" # lowerBound = "" # upperBound = "" # fetchSize = "50000" # createDisposition = "CREATE_NEVER" - # bigQuerySchemaPath = "gs://your-bucket/your-schema.json" - # disabledAlgorithms = "SSLv3, RC4" - # extraFilesToStage = "gs:///file.txt,projects//secrets//versions/" + # bigQuerySchemaPath = "" + # disabledAlgorithms = "" + # extraFilesToStage = "" # useStorageWriteApi = "false" # useStorageWriteApiAtLeastOnce = "false" } diff --git a/v2/oracle-to-googlecloud/README_Jdbc_to_PubSub.md b/v2/oracle-to-googlecloud/README_Jdbc_to_PubSub.md index e7f38605fa..f00889584a 100644 --- a/v2/oracle-to-googlecloud/README_Jdbc_to_PubSub.md +++ b/v2/oracle-to-googlecloud/README_Jdbc_to_PubSub.md @@ -18,20 +18,20 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **driverClassName** : The JDBC driver class name. (Example: com.mysql.jdbc.Driver). -* **connectionUrl** : The JDBC connection URL string. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example: 'echo -n "jdbc:mysql://some-host:3306/sampledb" | gcloud kms encrypt --location= --keyring= --key= --plaintext-file=- --ciphertext-file=- | base64' (Example: jdbc:mysql://some-host:3306/sampledb). -* **driverJars** : Comma-separated Cloud Storage paths for JDBC drivers. (Example: gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar). -* **query** : The query to run on the source to extract the data. (Example: select * from sampledb.sample_table). -* **outputTopic** : The Pub/Sub topic to publish to, in the format projects//topics/. (Example: projects/your-project-id/topics/your-topic-name). +* **driverClassName**: The JDBC driver class name. For example, `com.mysql.jdbc.Driver`. +* **connectionUrl**: The JDBC connection URL string. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example: 'echo -n "jdbc:mysql://some-host:3306/sampledb" | gcloud kms encrypt --location= --keyring= --key= --plaintext-file=- --ciphertext-file=- | base64' For example, `jdbc:mysql://some-host:3306/sampledb`. +* **driverJars**: Comma-separated Cloud Storage paths for JDBC drivers. For example, `gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar`. +* **query**: The query to run on the source to extract the data. For example, `select * from sampledb.sample_table`. +* **outputTopic**: The Pub/Sub topic to publish to. For example, `projects//topics/`. ### Optional parameters -* **username** : The username to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_username' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. -* **password** : The password to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_password' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. -* **connectionProperties** : The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`. (Example: unicode=true;characterEncoding=UTF-8). -* **KMSEncryptionKey** : The Cloud KMS Encryption Key to use to decrypt the username, password, and connection string. If a Cloud KMS key is passed in, the username, password, and connection string must all be passed in encrypted and base64 encoded. (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **disabledAlgorithms** : Comma separated algorithms to disable. If this value is set to none, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. (Example: SSLv3, RC4). -* **extraFilesToStage** : Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. (Example: gs:///file.txt,projects//secrets//versions/). +* **username**: The username to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_username' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. +* **password**: The password to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_password' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. +* **connectionProperties**: The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`. For example, `unicode=true;characterEncoding=UTF-8`. +* **KMSEncryptionKey**: The Cloud KMS Encryption Key to use to decrypt the username, password, and connection string. If a Cloud KMS key is passed in, the username, password, and connection string must all be passed in encrypted and base64 encoded. For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **disabledAlgorithms**: Comma separated algorithms to disable. If this value is set to `none`, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. For example, `SSLv3, RC4`. +* **extraFilesToStage**: Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. For example, `gs:///file.txt,projects//secrets//versions/`. @@ -223,17 +223,17 @@ resource "google_dataflow_flex_template_job" "jdbc_to_pubsub" { name = "jdbc-to-pubsub" region = var.region parameters = { - driverClassName = "com.mysql.jdbc.Driver" - connectionUrl = "jdbc:mysql://some-host:3306/sampledb" - driverJars = "gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar" - query = "select * from sampledb.sample_table" - outputTopic = "projects/your-project-id/topics/your-topic-name" + driverClassName = "" + connectionUrl = "" + driverJars = "" + query = "" + outputTopic = "" # username = "" # password = "" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" - # disabledAlgorithms = "SSLv3, RC4" - # extraFilesToStage = "gs:///file.txt,projects//secrets//versions/" + # connectionProperties = "" + # KMSEncryptionKey = "" + # disabledAlgorithms = "" + # extraFilesToStage = "" } } ``` diff --git a/v2/oracle-to-googlecloud/README_Jdbc_to_PubSub_Auto.md b/v2/oracle-to-googlecloud/README_Jdbc_to_PubSub_Auto.md index 4d77c043e0..58e16a9142 100644 --- a/v2/oracle-to-googlecloud/README_Jdbc_to_PubSub_Auto.md +++ b/v2/oracle-to-googlecloud/README_Jdbc_to_PubSub_Auto.md @@ -15,23 +15,23 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **driverClassName** : JDBC driver class name to use. (Example: com.mysql.jdbc.Driver). -* **connectionUrl** : Url connection string to connect to the JDBC source. Connection string can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. (Example: jdbc:mysql://some-host:3306/sampledb). -* **driverJars** : Comma separate Cloud Storage paths for JDBC drivers. (Example: gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar). -* **query** : Query to be executed on the source to extract the data. (Example: select * from sampledb.sample_table). -* **outputTopic** : The name of the topic to which data should published, in the format of 'projects/your-project-id/topics/your-topic-name' (Example: projects/your-project-id/topics/your-topic-name). +* **driverClassName**: JDBC driver class name to use. For example, `com.mysql.jdbc.Driver`. +* **connectionUrl**: Url connection string to connect to the JDBC source. Connection string can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. For example, `jdbc:mysql://some-host:3306/sampledb`. +* **driverJars**: Comma separate Cloud Storage paths for JDBC drivers. For example, `gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar`. +* **query**: Query to be executed on the source to extract the data. For example, `select * from sampledb.sample_table`. +* **outputTopic**: The name of the topic to publish data to. For example, `projects//topics/`. ### Optional parameters -* **username** : User name to be used for the JDBC connection. User name can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. -* **password** : Password to be used for the JDBC connection. Password can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. -* **connectionProperties** : Properties string to use for the JDBC connection. Format of the string must be [propertyName=property;]*. (Example: unicode=true;characterEncoding=UTF-8). -* **KMSEncryptionKey** : If this parameter is provided, password, user name and connection string should all be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **partitionColumn** : If this parameter is provided (along with `table`), JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only Long partition columns are supported. -* **table** : Table to read from using partitions. This parameter also accepts a subquery in parentheses. (Example: (select id, name from Person) as subq). -* **numPartitions** : The number of partitions. This, along with the lower and upper bound, form partitions strides for generated WHERE clause expressions used to split the partition column evenly. When the input is less than 1, the number is set to 1. -* **lowerBound** : Lower bound used in the partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). -* **upperBound** : Upper bound used in partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). +* **username**: User name to be used for the JDBC connection. User name can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. +* **password**: Password to be used for the JDBC connection. Password can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. +* **connectionProperties**: Properties string to use for the JDBC connection. Format of the string must be [propertyName=property;]*. For example, `unicode=true;characterEncoding=UTF-8`. +* **KMSEncryptionKey**: If this parameter is provided, password, user name and connection string should all be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **partitionColumn**: If this parameter is provided (along with `table`), JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only Long partition columns are supported. +* **table**: Table to read from using partitions. This parameter also accepts a subquery in parentheses. For example, `(select id, name from Person) as subq`. +* **numPartitions**: The number of partitions. This, along with the lower and upper bound, form partitions strides for generated WHERE clause expressions used to split the partition column evenly. When the input is less than 1, the number is set to 1. +* **lowerBound**: Lower bound used in the partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). +* **upperBound**: Upper bound used in partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). @@ -232,17 +232,17 @@ resource "google_dataflow_flex_template_job" "jdbc_to_pubsub_auto" { name = "jdbc-to-pubsub-auto" region = var.region parameters = { - driverClassName = "com.mysql.jdbc.Driver" - connectionUrl = "jdbc:mysql://some-host:3306/sampledb" - driverJars = "gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar" - query = "select * from sampledb.sample_table" - outputTopic = "projects/your-project-id/topics/your-topic-name" + driverClassName = "" + connectionUrl = "" + driverJars = "" + query = "" + outputTopic = "" # username = "" # password = "" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" + # connectionProperties = "" + # KMSEncryptionKey = "" # partitionColumn = "" - # table = "(select id, name from Person) as subq" + # table = "
" # numPartitions = "" # lowerBound = "" # upperBound = "" diff --git a/v2/oracle-to-googlecloud/README_Oracle_to_BigQuery.md b/v2/oracle-to-googlecloud/README_Oracle_to_BigQuery.md index ed7c592aea..9dfc367ec9 100644 --- a/v2/oracle-to-googlecloud/README_Oracle_to_BigQuery.md +++ b/v2/oracle-to-googlecloud/README_Oracle_to_BigQuery.md @@ -23,34 +23,31 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **connectionURL** : The JDBC connection URL string. Can be passed in as a string that's Base64-encoded and then encrypted with a Cloud KMS key. Note the difference between an Oracle non-RAC database connection string (`jdbc:oracle:thin:@some-host::`) and an Oracle RAC database connection string (`jdbc:oracle:thin:@//some-host[:]/`). (Example: jdbc:oracle:thin:@some-host::). -* **outputTable** : The BigQuery output table location. (Example: :.). -* **bigQueryLoadingTemporaryDirectory** : The temporary directory for the BigQuery loading process. (Example: gs://your-bucket/your-files/temp_dir). +* **connectionURL**: The JDBC connection URL string. Can be passed in as a string that's Base64-encoded and then encrypted with a Cloud KMS key. Note the difference between an Oracle non-RAC database connection string (`jdbc:oracle:thin:@some-host::`) and an Oracle RAC database connection string (`jdbc:oracle:thin:@//some-host[:]/`). For example, `jdbc:oracle:thin:@some-host::`. +* **outputTable**: The BigQuery output table location. For example, `:.`. +* **bigQueryLoadingTemporaryDirectory**: The temporary directory for the BigQuery loading process. For example, `gs://your-bucket/your-files/temp_dir`. ### Optional parameters -* **connectionProperties** : The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`.For more information, see Configuration Properties (https://dev.mysql.com/doc/connector-j/en/connector-j-reference-configuration-properties.html) in the MySQL documentation. (Example: unicode=true;characterEncoding=UTF-8). -* **username** : The username to use for the JDBC connection. Can be passed in as a string that's encrypted with a Cloud KMS key, or can be a Secret Manager secret in the form projects/{project}/secrets/{secret}/versions/{secret_version}. -* **password** : The password to use for the JDBC connection. Can be passed in as a string that's encrypted with a Cloud KMS key, or can be a Secret Manager secret in the form projects/{project}/secrets/{secret}/versions/{secret_version}. -* **query** : The query to run on the source to extract the data. Note that some JDBC SQL and BigQuery types, although sharing the same name, have some differences. Some important SQL -> BigQuery type mappings to keep in mind are: -DATETIME --> TIMESTAMP - -Type casting may be required if your schemas do not match. This parameter can be set to a gs:// path pointing to a file in Cloud Storage to load the query from. The file encoding should be UTF-8. (Example: select * from sampledb.sample_table). -* **KMSEncryptionKey** : The Cloud KMS encryption key to use to decrypt the username, password, and connection string. If you pass in a Cloud KMS key, you must also encrypt the username, password, and connection string. (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **useColumnAlias** : If set to `true`, the pipeline uses the column alias (`AS`) instead of the column name to map the rows to BigQuery. Defaults to `false`. -* **isTruncate** : If set to `true`, the pipeline truncates before loading data into BigQuery. Defaults to `false`, which causes the pipeline to append data. -* **partitionColumn** : If this parameter is provided with the name of the `table` defined as an optional parameter, JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only supports `Long` partition columns. -* **table** : The table to read from when using partitions. This parameter also accepts a subquery in parentheses. (Example: (select id, name from Person) as subq). -* **numPartitions** : The number of partitions. With the lower and upper bound, this value forms partition strides for generated `WHERE` clause expressions that are used to split the partition column evenly. When the input is less than `1`, the number is set to `1`. -* **lowerBound** : The lower bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. -* **upperBound** : The upper bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. -* **fetchSize** : The number of rows to be fetched from database at a time. Not used for partitioned reads. Defaults to: 50000. -* **createDisposition** : The BigQuery CreateDisposition to use. For example, `CREATE_IF_NEEDED` or `CREATE_NEVER`. Defaults to: CREATE_NEVER. -* **bigQuerySchemaPath** : The Cloud Storage path for the BigQuery JSON schema. If `createDisposition` is set to CREATE_IF_NEEDED, this parameter must be specified. (Example: gs://your-bucket/your-schema.json). -* **disabledAlgorithms** : Comma separated algorithms to disable. If this value is set to none, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. (Example: SSLv3, RC4). -* **extraFilesToStage** : Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. (Example: gs:///file.txt,projects//secrets//versions/). -* **useStorageWriteApi** : If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **connectionProperties**: The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`.For more information, see Configuration Properties (https://dev.mysql.com/doc/connector-j/8.1/en/connector-j-reference-configuration-properties.html) in the MySQL documentation. For example, `unicode=true;characterEncoding=UTF-8`. +* **username**: The username to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. Remove whitespace characters from the Base64-encoded string. +* **password**: The password to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. Remove whitespace characters from the Base64-encoded string. +* **query**: The query to run on the source to extract the data. Note that some JDBC SQL and BigQuery types, although sharing the same name, have some differences. Some important SQL -> BigQuery type mappings to keep in mind are `DATETIME --> TIMESTAMP`. Type casting may be required if your schemas do not match. For example, `select * from sampledb.sample_table`. +* **KMSEncryptionKey**: The Cloud KMS encryption key to use to decrypt the username, password, and connection string. If you pass in a Cloud KMS key, you must also encrypt the username, password, and connection string. For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **useColumnAlias**: If set to `true`, the pipeline uses the column alias (`AS`) instead of the column name to map the rows to BigQuery. Defaults to `false`. +* **isTruncate**: If set to `true`, the pipeline truncates before loading data into BigQuery. Defaults to `false`, which causes the pipeline to append data. +* **partitionColumn**: If this parameter is provided with the name of the `table` defined as an optional parameter, JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only supports `Long` partition columns. +* **table**: The table to read from when using partitions. This parameter also accepts a subquery in parentheses. For example, `(select id, name from Person) as subq`. +* **numPartitions**: The number of partitions. With the lower and upper bound, this value forms partition strides for generated `WHERE` clause expressions that are used to split the partition column evenly. When the input is less than `1`, the number is set to `1`. +* **lowerBound**: The lower bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. +* **upperBound**: The upper bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. +* **fetchSize**: The number of rows to be fetched from database at a time. Not used for partitioned reads. Defaults to: 50000. +* **createDisposition**: The BigQuery CreateDisposition to use. For example, `CREATE_IF_NEEDED` or `CREATE_NEVER`. Defaults to: CREATE_NEVER. +* **bigQuerySchemaPath**: The Cloud Storage path for the BigQuery JSON schema. If `createDisposition` is set to `CREATE_IF_NEEDED`, this parameter must be specified. For example, `gs://your-bucket/your-schema.json`. +* **disabledAlgorithms**: Comma separated algorithms to disable. If this value is set to `none`, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. For example, `SSLv3, RC4`. +* **extraFilesToStage**: Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. For example, `gs:///file.txt,projects//secrets//versions/`. +* **useStorageWriteApi**: If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. @@ -275,26 +272,26 @@ resource "google_dataflow_flex_template_job" "oracle_to_bigquery" { name = "oracle-to-bigquery" region = var.region parameters = { - connectionURL = "jdbc:oracle:thin:@some-host::" - outputTable = ":." - bigQueryLoadingTemporaryDirectory = "gs://your-bucket/your-files/temp_dir" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" + connectionURL = "" + outputTable = "" + bigQueryLoadingTemporaryDirectory = "" + # connectionProperties = "" # username = "" # password = "" - # query = "select * from sampledb.sample_table" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" + # query = "" + # KMSEncryptionKey = "" # useColumnAlias = "false" # isTruncate = "false" # partitionColumn = "" - # table = "(select id, name from Person) as subq" + # table = "
" # numPartitions = "" # lowerBound = "" # upperBound = "" # fetchSize = "50000" # createDisposition = "CREATE_NEVER" - # bigQuerySchemaPath = "gs://your-bucket/your-schema.json" - # disabledAlgorithms = "SSLv3, RC4" - # extraFilesToStage = "gs:///file.txt,projects//secrets//versions/" + # bigQuerySchemaPath = "" + # disabledAlgorithms = "" + # extraFilesToStage = "" # useStorageWriteApi = "false" # useStorageWriteApiAtLeastOnce = "false" } diff --git a/v2/postgresql-to-googlecloud/README_Jdbc_to_PubSub.md b/v2/postgresql-to-googlecloud/README_Jdbc_to_PubSub.md index 7f93601630..395fcba04a 100644 --- a/v2/postgresql-to-googlecloud/README_Jdbc_to_PubSub.md +++ b/v2/postgresql-to-googlecloud/README_Jdbc_to_PubSub.md @@ -18,20 +18,20 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **driverClassName** : The JDBC driver class name. (Example: com.mysql.jdbc.Driver). -* **connectionUrl** : The JDBC connection URL string. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example: 'echo -n "jdbc:mysql://some-host:3306/sampledb" | gcloud kms encrypt --location= --keyring= --key= --plaintext-file=- --ciphertext-file=- | base64' (Example: jdbc:mysql://some-host:3306/sampledb). -* **driverJars** : Comma-separated Cloud Storage paths for JDBC drivers. (Example: gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar). -* **query** : The query to run on the source to extract the data. (Example: select * from sampledb.sample_table). -* **outputTopic** : The Pub/Sub topic to publish to, in the format projects//topics/. (Example: projects/your-project-id/topics/your-topic-name). +* **driverClassName**: The JDBC driver class name. For example, `com.mysql.jdbc.Driver`. +* **connectionUrl**: The JDBC connection URL string. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example: 'echo -n "jdbc:mysql://some-host:3306/sampledb" | gcloud kms encrypt --location= --keyring= --key= --plaintext-file=- --ciphertext-file=- | base64' For example, `jdbc:mysql://some-host:3306/sampledb`. +* **driverJars**: Comma-separated Cloud Storage paths for JDBC drivers. For example, `gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar`. +* **query**: The query to run on the source to extract the data. For example, `select * from sampledb.sample_table`. +* **outputTopic**: The Pub/Sub topic to publish to. For example, `projects//topics/`. ### Optional parameters -* **username** : The username to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_username' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. -* **password** : The password to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_password' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. -* **connectionProperties** : The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`. (Example: unicode=true;characterEncoding=UTF-8). -* **KMSEncryptionKey** : The Cloud KMS Encryption Key to use to decrypt the username, password, and connection string. If a Cloud KMS key is passed in, the username, password, and connection string must all be passed in encrypted and base64 encoded. (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **disabledAlgorithms** : Comma separated algorithms to disable. If this value is set to none, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. (Example: SSLv3, RC4). -* **extraFilesToStage** : Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. (Example: gs:///file.txt,projects//secrets//versions/). +* **username**: The username to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_username' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. +* **password**: The password to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_password' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. +* **connectionProperties**: The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`. For example, `unicode=true;characterEncoding=UTF-8`. +* **KMSEncryptionKey**: The Cloud KMS Encryption Key to use to decrypt the username, password, and connection string. If a Cloud KMS key is passed in, the username, password, and connection string must all be passed in encrypted and base64 encoded. For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **disabledAlgorithms**: Comma separated algorithms to disable. If this value is set to `none`, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. For example, `SSLv3, RC4`. +* **extraFilesToStage**: Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. For example, `gs:///file.txt,projects//secrets//versions/`. @@ -223,17 +223,17 @@ resource "google_dataflow_flex_template_job" "jdbc_to_pubsub" { name = "jdbc-to-pubsub" region = var.region parameters = { - driverClassName = "com.mysql.jdbc.Driver" - connectionUrl = "jdbc:mysql://some-host:3306/sampledb" - driverJars = "gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar" - query = "select * from sampledb.sample_table" - outputTopic = "projects/your-project-id/topics/your-topic-name" + driverClassName = "" + connectionUrl = "" + driverJars = "" + query = "" + outputTopic = "" # username = "" # password = "" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" - # disabledAlgorithms = "SSLv3, RC4" - # extraFilesToStage = "gs:///file.txt,projects//secrets//versions/" + # connectionProperties = "" + # KMSEncryptionKey = "" + # disabledAlgorithms = "" + # extraFilesToStage = "" } } ``` diff --git a/v2/postgresql-to-googlecloud/README_Jdbc_to_PubSub_Auto.md b/v2/postgresql-to-googlecloud/README_Jdbc_to_PubSub_Auto.md index 66f3d6cf7e..412fd6d0cb 100644 --- a/v2/postgresql-to-googlecloud/README_Jdbc_to_PubSub_Auto.md +++ b/v2/postgresql-to-googlecloud/README_Jdbc_to_PubSub_Auto.md @@ -15,23 +15,23 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **driverClassName** : JDBC driver class name to use. (Example: com.mysql.jdbc.Driver). -* **connectionUrl** : Url connection string to connect to the JDBC source. Connection string can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. (Example: jdbc:mysql://some-host:3306/sampledb). -* **driverJars** : Comma separate Cloud Storage paths for JDBC drivers. (Example: gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar). -* **query** : Query to be executed on the source to extract the data. (Example: select * from sampledb.sample_table). -* **outputTopic** : The name of the topic to which data should published, in the format of 'projects/your-project-id/topics/your-topic-name' (Example: projects/your-project-id/topics/your-topic-name). +* **driverClassName**: JDBC driver class name to use. For example, `com.mysql.jdbc.Driver`. +* **connectionUrl**: Url connection string to connect to the JDBC source. Connection string can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. For example, `jdbc:mysql://some-host:3306/sampledb`. +* **driverJars**: Comma separate Cloud Storage paths for JDBC drivers. For example, `gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar`. +* **query**: Query to be executed on the source to extract the data. For example, `select * from sampledb.sample_table`. +* **outputTopic**: The name of the topic to publish data to. For example, `projects//topics/`. ### Optional parameters -* **username** : User name to be used for the JDBC connection. User name can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. -* **password** : Password to be used for the JDBC connection. Password can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. -* **connectionProperties** : Properties string to use for the JDBC connection. Format of the string must be [propertyName=property;]*. (Example: unicode=true;characterEncoding=UTF-8). -* **KMSEncryptionKey** : If this parameter is provided, password, user name and connection string should all be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **partitionColumn** : If this parameter is provided (along with `table`), JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only Long partition columns are supported. -* **table** : Table to read from using partitions. This parameter also accepts a subquery in parentheses. (Example: (select id, name from Person) as subq). -* **numPartitions** : The number of partitions. This, along with the lower and upper bound, form partitions strides for generated WHERE clause expressions used to split the partition column evenly. When the input is less than 1, the number is set to 1. -* **lowerBound** : Lower bound used in the partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). -* **upperBound** : Upper bound used in partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). +* **username**: User name to be used for the JDBC connection. User name can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. +* **password**: Password to be used for the JDBC connection. Password can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. +* **connectionProperties**: Properties string to use for the JDBC connection. Format of the string must be [propertyName=property;]*. For example, `unicode=true;characterEncoding=UTF-8`. +* **KMSEncryptionKey**: If this parameter is provided, password, user name and connection string should all be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **partitionColumn**: If this parameter is provided (along with `table`), JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only Long partition columns are supported. +* **table**: Table to read from using partitions. This parameter also accepts a subquery in parentheses. For example, `(select id, name from Person) as subq`. +* **numPartitions**: The number of partitions. This, along with the lower and upper bound, form partitions strides for generated WHERE clause expressions used to split the partition column evenly. When the input is less than 1, the number is set to 1. +* **lowerBound**: Lower bound used in the partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). +* **upperBound**: Upper bound used in partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). @@ -232,17 +232,17 @@ resource "google_dataflow_flex_template_job" "jdbc_to_pubsub_auto" { name = "jdbc-to-pubsub-auto" region = var.region parameters = { - driverClassName = "com.mysql.jdbc.Driver" - connectionUrl = "jdbc:mysql://some-host:3306/sampledb" - driverJars = "gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar" - query = "select * from sampledb.sample_table" - outputTopic = "projects/your-project-id/topics/your-topic-name" + driverClassName = "" + connectionUrl = "" + driverJars = "" + query = "" + outputTopic = "" # username = "" # password = "" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" + # connectionProperties = "" + # KMSEncryptionKey = "" # partitionColumn = "" - # table = "(select id, name from Person) as subq" + # table = "
" # numPartitions = "" # lowerBound = "" # upperBound = "" diff --git a/v2/postgresql-to-googlecloud/README_PostgreSQL_to_BigQuery.md b/v2/postgresql-to-googlecloud/README_PostgreSQL_to_BigQuery.md index 3c69cc5bcc..927b4255fd 100644 --- a/v2/postgresql-to-googlecloud/README_PostgreSQL_to_BigQuery.md +++ b/v2/postgresql-to-googlecloud/README_PostgreSQL_to_BigQuery.md @@ -23,34 +23,31 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **connectionURL** : The JDBC connection URL string. Can be passed in as a string that's Base64-encoded and then encrypted with a Cloud KMS key. (Example: jdbc:postgresql://some-host:5432/sampledb). -* **outputTable** : The BigQuery output table location. (Example: :.). -* **bigQueryLoadingTemporaryDirectory** : The temporary directory for the BigQuery loading process. (Example: gs://your-bucket/your-files/temp_dir). +* **connectionURL**: The JDBC connection URL string. Can be passed in as a string that's Base64-encoded and then encrypted with a Cloud KMS key. For example, `jdbc:postgresql://some-host:5432/sampledb`. +* **outputTable**: The BigQuery output table location. For example, `:.`. +* **bigQueryLoadingTemporaryDirectory**: The temporary directory for the BigQuery loading process. For example, `gs://your-bucket/your-files/temp_dir`. ### Optional parameters -* **connectionProperties** : The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`.For more information, see Configuration Properties (https://dev.mysql.com/doc/connector-j/en/connector-j-reference-configuration-properties.html) in the MySQL documentation. (Example: unicode=true;characterEncoding=UTF-8). -* **username** : The username to use for the JDBC connection. Can be passed in as a string that's encrypted with a Cloud KMS key, or can be a Secret Manager secret in the form projects/{project}/secrets/{secret}/versions/{secret_version}. -* **password** : The password to use for the JDBC connection. Can be passed in as a string that's encrypted with a Cloud KMS key, or can be a Secret Manager secret in the form projects/{project}/secrets/{secret}/versions/{secret_version}. -* **query** : The query to run on the source to extract the data. Note that some JDBC SQL and BigQuery types, although sharing the same name, have some differences. Some important SQL -> BigQuery type mappings to keep in mind are: -DATETIME --> TIMESTAMP - -Type casting may be required if your schemas do not match. This parameter can be set to a gs:// path pointing to a file in Cloud Storage to load the query from. The file encoding should be UTF-8. (Example: select * from sampledb.sample_table). -* **KMSEncryptionKey** : The Cloud KMS encryption key to use to decrypt the username, password, and connection string. If you pass in a Cloud KMS key, you must also encrypt the username, password, and connection string. (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **useColumnAlias** : If set to `true`, the pipeline uses the column alias (`AS`) instead of the column name to map the rows to BigQuery. Defaults to `false`. -* **isTruncate** : If set to `true`, the pipeline truncates before loading data into BigQuery. Defaults to `false`, which causes the pipeline to append data. -* **partitionColumn** : If this parameter is provided with the name of the `table` defined as an optional parameter, JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only supports `Long` partition columns. -* **table** : The table to read from when using partitions. This parameter also accepts a subquery in parentheses. (Example: (select id, name from Person) as subq). -* **numPartitions** : The number of partitions. With the lower and upper bound, this value forms partition strides for generated `WHERE` clause expressions that are used to split the partition column evenly. When the input is less than `1`, the number is set to `1`. -* **lowerBound** : The lower bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. -* **upperBound** : The upper bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. -* **fetchSize** : The number of rows to be fetched from database at a time. Not used for partitioned reads. Defaults to: 50000. -* **createDisposition** : The BigQuery CreateDisposition to use. For example, `CREATE_IF_NEEDED` or `CREATE_NEVER`. Defaults to: CREATE_NEVER. -* **bigQuerySchemaPath** : The Cloud Storage path for the BigQuery JSON schema. If `createDisposition` is set to CREATE_IF_NEEDED, this parameter must be specified. (Example: gs://your-bucket/your-schema.json). -* **disabledAlgorithms** : Comma separated algorithms to disable. If this value is set to none, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. (Example: SSLv3, RC4). -* **extraFilesToStage** : Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. (Example: gs:///file.txt,projects//secrets//versions/). -* **useStorageWriteApi** : If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **connectionProperties**: The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`.For more information, see Configuration Properties (https://dev.mysql.com/doc/connector-j/8.1/en/connector-j-reference-configuration-properties.html) in the MySQL documentation. For example, `unicode=true;characterEncoding=UTF-8`. +* **username**: The username to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. Remove whitespace characters from the Base64-encoded string. +* **password**: The password to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. Remove whitespace characters from the Base64-encoded string. +* **query**: The query to run on the source to extract the data. Note that some JDBC SQL and BigQuery types, although sharing the same name, have some differences. Some important SQL -> BigQuery type mappings to keep in mind are `DATETIME --> TIMESTAMP`. Type casting may be required if your schemas do not match. For example, `select * from sampledb.sample_table`. +* **KMSEncryptionKey**: The Cloud KMS encryption key to use to decrypt the username, password, and connection string. If you pass in a Cloud KMS key, you must also encrypt the username, password, and connection string. For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **useColumnAlias**: If set to `true`, the pipeline uses the column alias (`AS`) instead of the column name to map the rows to BigQuery. Defaults to `false`. +* **isTruncate**: If set to `true`, the pipeline truncates before loading data into BigQuery. Defaults to `false`, which causes the pipeline to append data. +* **partitionColumn**: If this parameter is provided with the name of the `table` defined as an optional parameter, JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only supports `Long` partition columns. +* **table**: The table to read from when using partitions. This parameter also accepts a subquery in parentheses. For example, `(select id, name from Person) as subq`. +* **numPartitions**: The number of partitions. With the lower and upper bound, this value forms partition strides for generated `WHERE` clause expressions that are used to split the partition column evenly. When the input is less than `1`, the number is set to `1`. +* **lowerBound**: The lower bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. +* **upperBound**: The upper bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. +* **fetchSize**: The number of rows to be fetched from database at a time. Not used for partitioned reads. Defaults to: 50000. +* **createDisposition**: The BigQuery CreateDisposition to use. For example, `CREATE_IF_NEEDED` or `CREATE_NEVER`. Defaults to: CREATE_NEVER. +* **bigQuerySchemaPath**: The Cloud Storage path for the BigQuery JSON schema. If `createDisposition` is set to `CREATE_IF_NEEDED`, this parameter must be specified. For example, `gs://your-bucket/your-schema.json`. +* **disabledAlgorithms**: Comma separated algorithms to disable. If this value is set to `none`, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. For example, `SSLv3, RC4`. +* **extraFilesToStage**: Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. For example, `gs:///file.txt,projects//secrets//versions/`. +* **useStorageWriteApi**: If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. @@ -275,26 +272,26 @@ resource "google_dataflow_flex_template_job" "postgresql_to_bigquery" { name = "postgresql-to-bigquery" region = var.region parameters = { - connectionURL = "jdbc:postgresql://some-host:5432/sampledb" - outputTable = ":." - bigQueryLoadingTemporaryDirectory = "gs://your-bucket/your-files/temp_dir" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" + connectionURL = "" + outputTable = "" + bigQueryLoadingTemporaryDirectory = "" + # connectionProperties = "" # username = "" # password = "" - # query = "select * from sampledb.sample_table" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" + # query = "" + # KMSEncryptionKey = "" # useColumnAlias = "false" # isTruncate = "false" # partitionColumn = "" - # table = "(select id, name from Person) as subq" + # table = "
" # numPartitions = "" # lowerBound = "" # upperBound = "" # fetchSize = "50000" # createDisposition = "CREATE_NEVER" - # bigQuerySchemaPath = "gs://your-bucket/your-schema.json" - # disabledAlgorithms = "SSLv3, RC4" - # extraFilesToStage = "gs:///file.txt,projects//secrets//versions/" + # bigQuerySchemaPath = "" + # disabledAlgorithms = "" + # extraFilesToStage = "" # useStorageWriteApi = "false" # useStorageWriteApiAtLeastOnce = "false" } diff --git a/v2/pubsub-binary-to-bigquery/README_PubSub_Avro_to_BigQuery.md b/v2/pubsub-binary-to-bigquery/README_PubSub_Avro_to_BigQuery.md index 5552b01456..c32974299d 100644 --- a/v2/pubsub-binary-to-bigquery/README_PubSub_Avro_to_BigQuery.md +++ b/v2/pubsub-binary-to-bigquery/README_PubSub_Avro_to_BigQuery.md @@ -19,19 +19,19 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **schemaPath** : The Cloud Storage location of the Avro schema file. For example, `gs://path/to/my/schema.avsc`. -* **inputSubscription** : The Pub/Sub input subscription to read from. (Example: projects//subscription/). -* **outputTableSpec** : The BigQuery output table location to write the output to. For example, `:.`.Depending on the `createDisposition` specified, the output table might be created automatically using the user provided Avro schema. -* **outputTopic** : The Pub/Sub topic to use for unprocessed records. (Example: projects//topics/). +* **schemaPath**: The Cloud Storage location of the Avro schema file. For example, `gs://path/to/my/schema.avsc`. +* **inputSubscription**: The Pub/Sub input subscription to read from. For example, `projects//subscription/`. +* **outputTableSpec**: The BigQuery output table location to write the output to. For example, `:.`.Depending on the `createDisposition` specified, the output table might be created automatically using the user provided Avro schema. +* **outputTopic**: The Pub/Sub topic to use for unprocessed records. For example, `projects//topics/`. ### Optional parameters -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to true. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. -* **writeDisposition** : The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. -* **createDisposition** : The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. -* **useStorageWriteApi** : If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **numStorageWriteApiStreams** : When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. -* **storageWriteApiTriggeringFrequencySec** : When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to true. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **writeDisposition**: The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. +* **createDisposition**: The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. +* **useStorageWriteApi**: If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **numStorageWriteApiStreams**: When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. +* **storageWriteApiTriggeringFrequencySec**: When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. @@ -221,9 +221,9 @@ resource "google_dataflow_flex_template_job" "pubsub_avro_to_bigquery" { region = var.region parameters = { schemaPath = "" - inputSubscription = "projects//subscription/" + inputSubscription = "" outputTableSpec = "" - outputTopic = "projects//topics/" + outputTopic = "" # useStorageWriteApiAtLeastOnce = "false" # writeDisposition = "WRITE_APPEND" # createDisposition = "CREATE_IF_NEEDED" diff --git a/v2/pubsub-binary-to-bigquery/README_PubSub_Proto_to_BigQuery_Flex.md b/v2/pubsub-binary-to-bigquery/README_PubSub_Proto_to_BigQuery_Flex.md index 01d0802b7d..0768dcf896 100644 --- a/v2/pubsub-binary-to-bigquery/README_PubSub_Proto_to_BigQuery_Flex.md +++ b/v2/pubsub-binary-to-bigquery/README_PubSub_Proto_to_BigQuery_Flex.md @@ -23,26 +23,26 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **protoSchemaPath** : The Cloud Storage location of the self-contained proto schema file. For example, gs://path/to/my/file.pb. You can generate this file with the `--descriptor_set_out` flag of the protoc command. The `--include_imports` flag guarantees that the file is self-contained. -* **fullMessageName** : The full proto message name. For example, `package.name`. `MessageName`, where `package.name` is the value provided for the `package` statement and not the `java_package` statement. -* **inputSubscription** : The Pub/Sub input subscription to read from. (Example: projects//subscription/). -* **outputTableSpec** : The BigQuery output table location to write the output to. For example, `:.`.Depending on the `createDisposition` specified, the output table might be created automatically using the user provided Avro schema. -* **outputTopic** : The Pub/Sub topic to use for unprocessed records. (Example: projects//topics/). +* **protoSchemaPath**: The Cloud Storage location of the self-contained proto schema file. For example, `gs://path/to/my/file.pb`. You can generate this file with the `--descriptor_set_out` flag of the protoc command. The `--include_imports` flag guarantees that the file is self-contained. +* **fullMessageName**: The full proto message name. For example, `package.name`. `MessageName`, where `package.name` is the value provided for the `package` statement and not the `java_package` statement. +* **inputSubscription**: The Pub/Sub input subscription to read from. For example, `projects//subscription/`. +* **outputTableSpec**: The BigQuery output table location to write the output to. For example, `:.`.Depending on the `createDisposition` specified, the output table might be created automatically using the user provided Avro schema. +* **outputTopic**: The Pub/Sub topic to use for unprocessed records. For example, `projects//topics/`. ### Optional parameters -* **preserveProtoFieldNames** : To preserve the original proto field name in JSON, set this property to true. To use more standard JSON names, set to false. For example, `false` would change `field_name` to `fieldName`. Defaults to: false. -* **bigQueryTableSchemaPath** : The Cloud Storage path to the BigQuery schema path. If this value isn't provided, then the schema is inferred from the Proto schema. (Example: gs://MyBucket/bq_schema.json). -* **udfOutputTopic** : The Pub/Sub topic storing the UDF errors. If this value isn't provided, UDF errors are sent to the same topic as `outputTopic`. (Example: projects/your-project-id/topics/your-topic-name). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. -* **writeDisposition** : The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. -* **createDisposition** : The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. (Example: gs://my-bucket/my-udfs/my_file.js). -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **javascriptTextTransformReloadIntervalMinutes** : Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is 0, UDF reloading is disabled. The default value is 0. -* **useStorageWriteApi** : If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **numStorageWriteApiStreams** : When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. -* **storageWriteApiTriggeringFrequencySec** : When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. +* **preserveProtoFieldNames**: To preserve the original proto field name in JSON, set this property to `true`. To use more standard JSON names, set to `false`. For example, `false` would change `field_name` to `fieldName`. Defaults to: `false`. +* **bigQueryTableSchemaPath**: The Cloud Storage path to the BigQuery schema path. If this value isn't provided, then the schema is inferred from the Proto schema. For example, `gs://MyBucket/bq_schema.json`. +* **udfOutputTopic**: The Pub/Sub topic storing the UDF errors. If this value isn't provided, UDF errors are sent to the same topic as `outputTopic`. For example, `projects/your-project-id/topics/your-topic-name`. +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **writeDisposition**: The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. +* **createDisposition**: The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **javascriptTextTransformReloadIntervalMinutes**: Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is `0`, UDF reloading is disabled. The default value is `0`. +* **useStorageWriteApi**: If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **numStorageWriteApiStreams**: When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. +* **storageWriteApiTriggeringFrequencySec**: When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. ## User-Defined functions (UDFs) @@ -264,16 +264,16 @@ resource "google_dataflow_flex_template_job" "pubsub_proto_to_bigquery_flex" { parameters = { protoSchemaPath = "" fullMessageName = "" - inputSubscription = "projects//subscription/" + inputSubscription = "" outputTableSpec = "" - outputTopic = "projects//topics/" + outputTopic = "" # preserveProtoFieldNames = "false" - # bigQueryTableSchemaPath = "gs://MyBucket/bq_schema.json" - # udfOutputTopic = "projects/your-project-id/topics/your-topic-name" + # bigQueryTableSchemaPath = "" + # udfOutputTopic = "" # useStorageWriteApiAtLeastOnce = "false" # writeDisposition = "WRITE_APPEND" # createDisposition = "CREATE_IF_NEEDED" - # javascriptTextTransformGcsPath = "gs://my-bucket/my-udfs/my_file.js" + # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" # javascriptTextTransformReloadIntervalMinutes = "0" # useStorageWriteApi = "false" diff --git a/v2/pubsub-binary-to-bigquery/README_PubSub_Proto_to_BigQuery_Xlang.md b/v2/pubsub-binary-to-bigquery/README_PubSub_Proto_to_BigQuery_Xlang.md index 7987ba66bd..c0bde36166 100644 --- a/v2/pubsub-binary-to-bigquery/README_PubSub_Proto_to_BigQuery_Xlang.md +++ b/v2/pubsub-binary-to-bigquery/README_PubSub_Proto_to_BigQuery_Xlang.md @@ -23,25 +23,25 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **protoSchemaPath** : The Cloud Storage location of the self-contained proto schema file. For example, gs://path/to/my/file.pb. You can generate this file with the `--descriptor_set_out` flag of the protoc command. The `--include_imports` flag guarantees that the file is self-contained. -* **fullMessageName** : The full proto message name. For example, `package.name`. `MessageName`, where `package.name` is the value provided for the `package` statement and not the `java_package` statement. -* **inputSubscription** : The Pub/Sub input subscription to read from. (Example: projects//subscription/). -* **outputTableSpec** : The BigQuery output table location to write the output to. For example, `:.`.Depending on the `createDisposition` specified, the output table might be created automatically using the user provided Avro schema. -* **outputTopic** : The Pub/Sub topic to use for unprocessed records. (Example: projects//topics/). +* **protoSchemaPath**: The Cloud Storage location of the self-contained proto schema file. For example, `gs://path/to/my/file.pb`. You can generate this file with the `--descriptor_set_out` flag of the protoc command. The `--include_imports` flag guarantees that the file is self-contained. +* **fullMessageName**: The full proto message name. For example, `package.name`. `MessageName`, where `package.name` is the value provided for the `package` statement and not the `java_package` statement. +* **inputSubscription**: The Pub/Sub input subscription to read from. For example, `projects//subscription/`. +* **outputTableSpec**: The BigQuery output table location to write the output to. For example, `:.`.Depending on the `createDisposition` specified, the output table might be created automatically using the user provided Avro schema. +* **outputTopic**: The Pub/Sub topic to use for unprocessed records. For example, `projects//topics/`. ### Optional parameters -* **preserveProtoFieldNames** : To preserve the original proto field name in JSON, set this property to true. To use more standard JSON names, set to false. For example, `false` would change `field_name` to `fieldName`. Defaults to: false. -* **bigQueryTableSchemaPath** : The Cloud Storage path to the BigQuery schema path. If this value isn't provided, then the schema is inferred from the Proto schema. (Example: gs://MyBucket/bq_schema.json). -* **udfOutputTopic** : The Pub/Sub topic storing the UDF errors. If this value isn't provided, UDF errors are sent to the same topic as `outputTopic`. (Example: projects/your-project-id/topics/your-topic-name). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. -* **writeDisposition** : The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. -* **createDisposition** : The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. -* **pythonExternalTextTransformGcsPath** : The Cloud Storage path pattern for the Python code containing your user-defined functions. (Example: gs://your-bucket/your-function.py). -* **pythonExternalTextTransformFunctionName** : The name of the function to call from your Python file. Use only letters, digits, and underscores. (Example: 'transform' or 'transform_udf1'). -* **useStorageWriteApi** : If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **numStorageWriteApiStreams** : When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. -* **storageWriteApiTriggeringFrequencySec** : When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. +* **preserveProtoFieldNames**: To preserve the original proto field name in JSON, set this property to `true`. To use more standard JSON names, set to `false`. For example, `false` would change `field_name` to `fieldName`. Defaults to: `false`. +* **bigQueryTableSchemaPath**: The Cloud Storage path to the BigQuery schema path. If this value isn't provided, then the schema is inferred from the Proto schema. For example, `gs://MyBucket/bq_schema.json`. +* **udfOutputTopic**: The Pub/Sub topic storing the UDF errors. If this value isn't provided, UDF errors are sent to the same topic as `outputTopic`. For example, `projects/your-project-id/topics/your-topic-name`. +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **writeDisposition**: The BigQuery WriteDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload) value. For example, `WRITE_APPEND`, `WRITE_EMPTY`, or `WRITE_TRUNCATE`. Defaults to `WRITE_APPEND`. +* **createDisposition**: The BigQuery CreateDisposition (https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationload). For example, `CREATE_IF_NEEDED` and `CREATE_NEVER`. Defaults to `CREATE_IF_NEEDED`. +* **pythonExternalTextTransformGcsPath**: The Cloud Storage path pattern for the Python code containing your user-defined functions. For example, `gs://your-bucket/your-function.py`. +* **pythonExternalTextTransformFunctionName**: The name of the function to call from your Python file. Use only letters, digits, and underscores. For example, `'transform' or 'transform_udf1'`. +* **useStorageWriteApi**: If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **numStorageWriteApiStreams**: When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. +* **storageWriteApiTriggeringFrequencySec**: When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. @@ -250,17 +250,17 @@ resource "google_dataflow_flex_template_job" "pubsub_proto_to_bigquery_xlang" { parameters = { protoSchemaPath = "" fullMessageName = "" - inputSubscription = "projects//subscription/" + inputSubscription = "" outputTableSpec = "" - outputTopic = "projects//topics/" + outputTopic = "" # preserveProtoFieldNames = "false" - # bigQueryTableSchemaPath = "gs://MyBucket/bq_schema.json" - # udfOutputTopic = "projects/your-project-id/topics/your-topic-name" + # bigQueryTableSchemaPath = "" + # udfOutputTopic = "" # useStorageWriteApiAtLeastOnce = "false" # writeDisposition = "WRITE_APPEND" # createDisposition = "CREATE_IF_NEEDED" - # pythonExternalTextTransformGcsPath = "gs://your-bucket/your-function.py" - # pythonExternalTextTransformFunctionName = "'transform' or 'transform_udf1'" + # pythonExternalTextTransformGcsPath = "" + # pythonExternalTextTransformFunctionName = "" # useStorageWriteApi = "false" # numStorageWriteApiStreams = "0" # storageWriteApiTriggeringFrequencySec = "" diff --git a/v2/pubsub-binary-to-bigquery/src/main/java/com/google/cloud/teleport/v2/templates/PubsubProtoToBigQuery.java b/v2/pubsub-binary-to-bigquery/src/main/java/com/google/cloud/teleport/v2/templates/PubsubProtoToBigQuery.java index a5bcd521b5..8ef6f65738 100644 --- a/v2/pubsub-binary-to-bigquery/src/main/java/com/google/cloud/teleport/v2/templates/PubsubProtoToBigQuery.java +++ b/v2/pubsub-binary-to-bigquery/src/main/java/com/google/cloud/teleport/v2/templates/PubsubProtoToBigQuery.java @@ -164,7 +164,7 @@ public interface PubSubProtoToBigQueryOptions description = "Cloud Storage Path to the Proto Schema File", helpText = "The Cloud Storage location of the self-contained proto schema file. For example," - + " gs://path/to/my/file.pb. You can generate this file with" + + " `gs://path/to/my/file.pb`. You can generate this file with" + " the `--descriptor_set_out` flag of the protoc command." + " The `--include_imports` flag guarantees that the file is self-contained.") @Required @@ -190,9 +190,9 @@ public interface PubSubProtoToBigQueryOptions optional = true, description = "Preserve Proto Field Names", helpText = - "To preserve the original proto field name in JSON, set this property to true. " - + "To use more standard JSON names, set to false." - + " For example, `false` would change `field_name` to `fieldName`. Defaults to: false.") + "To preserve the original proto field name in JSON, set this property to `true`. " + + "To use more standard JSON names, set to `false`." + + " For example, `false` would change `field_name` to `fieldName`. Defaults to: `false`.") @Default.Boolean(false) Boolean getPreserveProtoFieldNames(); diff --git a/v2/pubsub-cdc-to-bigquery/README_PubSub_CDC_to_BigQuery.md b/v2/pubsub-cdc-to-bigquery/README_PubSub_CDC_to_BigQuery.md index 483a0b6f89..1783b80970 100644 --- a/v2/pubsub-cdc-to-bigquery/README_PubSub_CDC_to_BigQuery.md +++ b/v2/pubsub-cdc-to-bigquery/README_PubSub_CDC_to_BigQuery.md @@ -15,30 +15,30 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputSubscription** : Pub/Sub subscription to read the input from, in the format of 'projects/your-project-id/subscriptions/your-subscription-name' (Example: projects/your-project-id/subscriptions/your-subscription-name). -* **outputTableNameTemplate** : The location of the BigQuery table to write the output to. If a table does not already exist one will be created automatically. Defaults to: _metadata_table. +* **inputSubscription**: Pub/Sub subscription to read the input from, in the format of 'projects/your-project-id/subscriptions/your-subscription-name' For example, `projects/your-project-id/subscriptions/your-subscription-name`. +* **outputTableNameTemplate**: The location of the BigQuery table to write the output to. If a table does not already exist one will be created automatically. Defaults to: _metadata_table. ### Optional parameters -* **autoMapTables** : Determines if new columns and tables should be automatically created in BigQuery. Defaults to: true. -* **schemaFilePath** : This is the file location that contains the table definition to be used when creating the table in BigQuery. If left blank the table will get created with generic string typing. -* **outputDatasetTemplate** : The name for the dataset to contain the replica table. Defaults to: {_metadata_dataset}. -* **outputTableSpec** : BigQuery table location to write the output to. The name should be in the format `:.`. The table's schema must match input objects. -* **outputDeadletterTable** : BigQuery table for failed messages. Messages failed to reach the output table for different reasons (e.g., mismatched schema, malformed json) are written to this table. If it doesn't exist, it will be created during pipeline execution. If not specified, "outputTableSpec_error_records" is used instead. (Example: your-project-id:your-dataset.your-table-name). -* **deadLetterQueueDirectory** : The name of the directory on Cloud Storage you want to write dead letters messages to. -* **windowDuration** : The window duration/size in which DLQ data will be written to Cloud Storage. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). (Example: 5m). Defaults to: 5s. -* **threadCount** : The number of parallel threads you want to split your data into. Defaults to: 100. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. (Example: gs://my-bucket/my-udfs/my_file.js). -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **javascriptTextTransformReloadIntervalMinutes** : Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is 0, UDF reloading is disabled. The default value is 0. -* **pythonTextTransformGcsPath** : The Cloud Storage path pattern for the Python code containing your user-defined functions. (Example: gs://your-bucket/your-transforms/*.py). -* **pythonRuntimeVersion** : The runtime version to use for this Python UDF. -* **pythonTextTransformFunctionName** : The name of the function to call from your JavaScript file. Use only letters, digits, and underscores. (Example: transform_udf1). -* **runtimeRetries** : The number of times a runtime will be retried before failing. Defaults to: 5. -* **useStorageWriteApi** : If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. -* **numStorageWriteApiStreams** : When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. -* **storageWriteApiTriggeringFrequencySec** : When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. +* **autoMapTables**: Determines if new columns and tables should be automatically created in BigQuery. Defaults to: true. +* **schemaFilePath**: This is the file location that contains the table definition to be used when creating the table in BigQuery. If left blank the table will get created with generic string typing. +* **outputDatasetTemplate**: The name for the dataset to contain the replica table. Defaults to: {_metadata_dataset}. +* **outputTableSpec**: BigQuery table location to write the output to. The name should be in the format `:.`. The table's schema must match input objects. +* **outputDeadletterTable**: BigQuery table for failed messages. Messages failed to reach the output table for different reasons (e.g., mismatched schema, malformed json) are written to this table. If it doesn't exist, it will be created during pipeline execution. If not specified, "outputTableSpec_error_records" is used instead. For example, `your-project-id:your-dataset.your-table-name`. +* **deadLetterQueueDirectory**: The name of the directory on Cloud Storage you want to write dead letters messages to. +* **windowDuration**: The window duration/size in which DLQ data will be written to Cloud Storage. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). For example, `5m`. Defaults to: 5s. +* **threadCount**: The number of parallel threads you want to split your data into. Defaults to: 100. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **javascriptTextTransformReloadIntervalMinutes**: Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is `0`, UDF reloading is disabled. The default value is `0`. +* **pythonTextTransformGcsPath**: The Cloud Storage path pattern for the Python code containing your user-defined functions. For example, `gs://your-bucket/your-transforms/*.py`. +* **pythonRuntimeVersion**: The runtime version to use for this Python UDF. +* **pythonTextTransformFunctionName**: The name of the function to call from your JavaScript file. Use only letters, digits, and underscores. For example, `transform_udf1`. +* **runtimeRetries**: The number of times a runtime will be retried before failing. Defaults to: 5. +* **useStorageWriteApi**: If true, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **numStorageWriteApiStreams**: When using the Storage Write API, specifies the number of write streams. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. Defaults to: 0. +* **storageWriteApiTriggeringFrequencySec**: When using the Storage Write API, specifies the triggering frequency, in seconds. If `useStorageWriteApi` is `true` and `useStorageWriteApiAtLeastOnce` is `false`, then you must set this parameter. ## User-Defined functions (UDFs) @@ -270,22 +270,22 @@ resource "google_dataflow_flex_template_job" "pubsub_cdc_to_bigquery" { name = "pubsub-cdc-to-bigquery" region = var.region parameters = { - inputSubscription = "projects/your-project-id/subscriptions/your-subscription-name" + inputSubscription = "" outputTableNameTemplate = "_metadata_table" # autoMapTables = "true" # schemaFilePath = "" # outputDatasetTemplate = "{_metadata_dataset}" # outputTableSpec = "" - # outputDeadletterTable = "your-project-id:your-dataset.your-table-name" + # outputDeadletterTable = "" # deadLetterQueueDirectory = "" - # windowDuration = "5m" + # windowDuration = "5s" # threadCount = "100" - # javascriptTextTransformGcsPath = "gs://my-bucket/my-udfs/my_file.js" + # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" # javascriptTextTransformReloadIntervalMinutes = "0" - # pythonTextTransformGcsPath = "gs://your-bucket/your-transforms/*.py" + # pythonTextTransformGcsPath = "" # pythonRuntimeVersion = "" - # pythonTextTransformFunctionName = "transform_udf1" + # pythonTextTransformFunctionName = "" # runtimeRetries = "5" # useStorageWriteApi = "false" # useStorageWriteApiAtLeastOnce = "false" diff --git a/v2/pubsub-to-jms/README_Pubsub_to_Jms.md b/v2/pubsub-to-jms/README_Pubsub_to_Jms.md index 3f6c19ffcb..765d1431b6 100644 --- a/v2/pubsub-to-jms/README_Pubsub_to_Jms.md +++ b/v2/pubsub-to-jms/README_Pubsub_to_Jms.md @@ -14,15 +14,15 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputSubscription** : Pub/Sub subscription to read the input from, in the format of 'projects/your-project-id/subscriptions/your-subscription-name' (Example: projects/your-project-id/subscriptions/your-subscription-name). -* **outputName** : JMS Queue/Topic Name to write the input to. (Example: queue). -* **outputType** : JMS Destination Type to Write the input to. (Example: queue). -* **username** : JMS username for authentication with JMS server (Example: sampleusername). -* **password** : Password for username provided for authentication with JMS server (Example: samplepassword). +* **inputSubscription**: Pub/Sub subscription to read the input from, in the format of 'projects/your-project-id/subscriptions/your-subscription-name' For example, `projects/your-project-id/subscriptions/your-subscription-name`. +* **outputName**: JMS Queue/Topic Name to write the input to. For example, `queue`. +* **outputType**: JMS Destination Type to Write the input to. For example, `queue`. +* **username**: JMS username for authentication with JMS server For example, `sampleusername`. +* **password**: Password for username provided for authentication with JMS server For example, `samplepassword`. ### Optional parameters -* **jmsServer** : Server IP for JMS Host (Example: host:5672). +* **jmsServer**: Server IP for JMS Host For example, `host:5672`. @@ -199,12 +199,12 @@ resource "google_dataflow_flex_template_job" "pubsub_to_jms" { name = "pubsub-to-jms" region = var.region parameters = { - inputSubscription = "projects/your-project-id/subscriptions/your-subscription-name" - outputName = "queue" - outputType = "queue" - username = "sampleusername" - password = "samplepassword" - # jmsServer = "host:5672" + inputSubscription = "" + outputName = "" + outputType = "" + username = "" + password = "" + # jmsServer = "" } } ``` diff --git a/v2/pubsub-to-kafka/README_PubSub_to_Kafka.md b/v2/pubsub-to-kafka/README_PubSub_to_Kafka.md index d86eaa04c8..6d4ca3f500 100644 --- a/v2/pubsub-to-kafka/README_PubSub_to_Kafka.md +++ b/v2/pubsub-to-kafka/README_PubSub_to_Kafka.md @@ -14,18 +14,18 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputTopic** : The name of the topic from which data should published, in the format of 'projects/your-project-id/topics/your-topic-name' (Example: projects/your-project-id/topics/your-topic-name). -* **outputTopic** : Kafka topic to write the input from pubsub. (Example: topic). -* **outputDeadLetterTopic** : The Pub/Sub topic to publish deadletter records to. The name should be in the format of projects/your-project-id/topics/your-topic-name. +* **inputTopic**: The name of the topic from which data should published, in the format of 'projects/your-project-id/topics/your-topic-name' For example, `projects/your-project-id/topics/your-topic-name`. +* **outputTopic**: Kafka topic to write the input from pubsub. For example, `topic`. +* **outputDeadLetterTopic**: The Pub/Sub topic to publish deadletter records to. The name should be in the format of projects/your-project-id/topics/your-topic-name. ### Optional parameters -* **bootstrapServer** : Kafka Bootstrap Server (Example: localhost:9092). -* **secretStoreUrl** : URL to credentials in Vault. -* **vaultToken** : Token to use for Vault. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. (Example: gs://my-bucket/my-udfs/my_file.js). -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **javascriptTextTransformReloadIntervalMinutes** : Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is 0, UDF reloading is disabled. The default value is 0. +* **bootstrapServer**: Kafka Bootstrap Server For example, `localhost:9092`. +* **secretStoreUrl**: URL to credentials in Vault. +* **vaultToken**: Token to use for Vault. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **javascriptTextTransformReloadIntervalMinutes**: Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is `0`, UDF reloading is disabled. The default value is `0`. ## User-Defined functions (UDFs) @@ -221,13 +221,13 @@ resource "google_dataflow_flex_template_job" "pubsub_to_kafka" { name = "pubsub-to-kafka" region = var.region parameters = { - inputTopic = "projects/your-project-id/topics/your-topic-name" - outputTopic = "topic" + inputTopic = "" + outputTopic = "" outputDeadLetterTopic = "" - # bootstrapServer = "localhost:9092" + # bootstrapServer = "" # secretStoreUrl = "" # vaultToken = "" - # javascriptTextTransformGcsPath = "gs://my-bucket/my-udfs/my_file.js" + # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" # javascriptTextTransformReloadIntervalMinutes = "0" } diff --git a/v2/pubsub-to-mongodb/README_Cloud_PubSub_to_MongoDB.md b/v2/pubsub-to-mongodb/README_Cloud_PubSub_to_MongoDB.md index 5e13f69c30..1eeb98c5fb 100644 --- a/v2/pubsub-to-mongodb/README_Cloud_PubSub_to_MongoDB.md +++ b/v2/pubsub-to-mongodb/README_Cloud_PubSub_to_MongoDB.md @@ -23,24 +23,24 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputSubscription** : Name of the Pub/Sub subscription. (Example: projects/your-project-id/subscriptions/your-subscription-name). -* **mongoDBUri** : Comma separated list of MongoDB servers. (Example: host1:port,host2:port,host3:port). -* **database** : Database in MongoDB to store the collection. (Example: my-db). -* **collection** : Name of the collection in the MongoDB database. (Example: my-collection). -* **deadletterTable** : The BigQuery table that stores messages caused by failures, such as mismatched schema, malformed JSON, and so on. (Example: your-project-id:your-dataset.your-table-name). +* **inputSubscription**: Name of the Pub/Sub subscription. For example, `projects/your-project-id/subscriptions/your-subscription-name`. +* **mongoDBUri**: Comma separated list of MongoDB servers. For example, `host1:port,host2:port,host3:port`. +* **database**: Database in MongoDB to store the collection. For example, `my-db`. +* **collection**: Name of the collection in the MongoDB database. For example, `my-collection`. +* **deadletterTable**: The BigQuery table that stores messages caused by failures, such as mismatched schema, malformed JSON, and so on. For example, `your-project-id:your-dataset.your-table-name`. ### Optional parameters -* **batchSize** : Batch size used for batch insertion of documents into MongoDB. Defaults to: 1000. -* **batchSizeBytes** : Batch size in bytes. Defaults to: 5242880. -* **maxConnectionIdleTime** : Maximum idle time allowed in seconds before connection timeout occurs. Defaults to: 60000. -* **sslEnabled** : Boolean value indicating whether the connection to MongoDB is SSL enabled. Defaults to: true. -* **ignoreSSLCertificate** : Boolean value indicating whether to ignore the SSL certificate. Defaults to: true. -* **withOrdered** : Boolean value enabling ordered bulk insertions into MongoDB. Defaults to: true. -* **withSSLInvalidHostNameAllowed** : Boolean value indicating whether an invalid hostname is allowed for the SSL connection. Defaults to: true. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. (Example: gs://my-bucket/my-udfs/my_file.js). -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **javascriptTextTransformReloadIntervalMinutes** : Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is 0, UDF reloading is disabled. The default value is 0. +* **batchSize**: Batch size used for batch insertion of documents into MongoDB. Defaults to: 1000. +* **batchSizeBytes**: Batch size in bytes. Defaults to: 5242880. +* **maxConnectionIdleTime**: Maximum idle time allowed in seconds before connection timeout occurs. Defaults to: 60000. +* **sslEnabled**: Boolean value indicating whether the connection to MongoDB is SSL enabled. Defaults to: true. +* **ignoreSSLCertificate**: Boolean value indicating whether to ignore the SSL certificate. Defaults to: true. +* **withOrdered**: Boolean value enabling ordered bulk insertions into MongoDB. Defaults to: true. +* **withSSLInvalidHostNameAllowed**: Boolean value indicating whether an invalid hostname is allowed for the SSL connection. Defaults to: true. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **javascriptTextTransformReloadIntervalMinutes**: Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is `0`, UDF reloading is disabled. The default value is `0`. ## User-Defined functions (UDFs) @@ -254,11 +254,11 @@ resource "google_dataflow_flex_template_job" "cloud_pubsub_to_mongodb" { name = "cloud-pubsub-to-mongodb" region = var.region parameters = { - inputSubscription = "projects/your-project-id/subscriptions/your-subscription-name" - mongoDBUri = "host1:port,host2:port,host3:port" - database = "my-db" - collection = "my-collection" - deadletterTable = "your-project-id:your-dataset.your-table-name" + inputSubscription = "" + mongoDBUri = "" + database = "" + collection = "" + deadletterTable = "" # batchSize = "1000" # batchSizeBytes = "5242880" # maxConnectionIdleTime = "60000" @@ -266,7 +266,7 @@ resource "google_dataflow_flex_template_job" "cloud_pubsub_to_mongodb" { # ignoreSSLCertificate = "true" # withOrdered = "true" # withSSLInvalidHostNameAllowed = "true" - # javascriptTextTransformGcsPath = "gs://my-bucket/my-udfs/my_file.js" + # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" # javascriptTextTransformReloadIntervalMinutes = "0" } diff --git a/v2/pubsub-to-mongodb/README_Cloud_PubSub_to_MongoDB_Xlang.md b/v2/pubsub-to-mongodb/README_Cloud_PubSub_to_MongoDB_Xlang.md index d5a9977e44..21c811395e 100644 --- a/v2/pubsub-to-mongodb/README_Cloud_PubSub_to_MongoDB_Xlang.md +++ b/v2/pubsub-to-mongodb/README_Cloud_PubSub_to_MongoDB_Xlang.md @@ -23,23 +23,23 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputSubscription** : Name of the Pub/Sub subscription. (Example: projects/your-project-id/subscriptions/your-subscription-name). -* **mongoDBUri** : Comma separated list of MongoDB servers. (Example: host1:port,host2:port,host3:port). -* **database** : Database in MongoDB to store the collection. (Example: my-db). -* **collection** : Name of the collection in the MongoDB database. (Example: my-collection). -* **deadletterTable** : The BigQuery table that stores messages caused by failures, such as mismatched schema, malformed JSON, and so on. (Example: your-project-id:your-dataset.your-table-name). +* **inputSubscription**: Name of the Pub/Sub subscription. For example, `projects/your-project-id/subscriptions/your-subscription-name`. +* **mongoDBUri**: Comma separated list of MongoDB servers. For example, `host1:port,host2:port,host3:port`. +* **database**: Database in MongoDB to store the collection. For example, `my-db`. +* **collection**: Name of the collection in the MongoDB database. For example, `my-collection`. +* **deadletterTable**: The BigQuery table that stores messages caused by failures, such as mismatched schema, malformed JSON, and so on. For example, `your-project-id:your-dataset.your-table-name`. ### Optional parameters -* **batchSize** : Batch size used for batch insertion of documents into MongoDB. Defaults to: 1000. -* **batchSizeBytes** : Batch size in bytes. Defaults to: 5242880. -* **maxConnectionIdleTime** : Maximum idle time allowed in seconds before connection timeout occurs. Defaults to: 60000. -* **sslEnabled** : Boolean value indicating whether the connection to MongoDB is SSL enabled. Defaults to: true. -* **ignoreSSLCertificate** : Boolean value indicating whether to ignore the SSL certificate. Defaults to: true. -* **withOrdered** : Boolean value enabling ordered bulk insertions into MongoDB. Defaults to: true. -* **withSSLInvalidHostNameAllowed** : Boolean value indicating whether an invalid hostname is allowed for the SSL connection. Defaults to: true. -* **pythonExternalTextTransformGcsPath** : The Cloud Storage path pattern for the Python code containing your user-defined functions. (Example: gs://your-bucket/your-function.py). -* **pythonExternalTextTransformFunctionName** : The name of the function to call from your Python file. Use only letters, digits, and underscores. (Example: 'transform' or 'transform_udf1'). +* **batchSize**: Batch size used for batch insertion of documents into MongoDB. Defaults to: 1000. +* **batchSizeBytes**: Batch size in bytes. Defaults to: 5242880. +* **maxConnectionIdleTime**: Maximum idle time allowed in seconds before connection timeout occurs. Defaults to: 60000. +* **sslEnabled**: Boolean value indicating whether the connection to MongoDB is SSL enabled. Defaults to: true. +* **ignoreSSLCertificate**: Boolean value indicating whether to ignore the SSL certificate. Defaults to: true. +* **withOrdered**: Boolean value enabling ordered bulk insertions into MongoDB. Defaults to: true. +* **withSSLInvalidHostNameAllowed**: Boolean value indicating whether an invalid hostname is allowed for the SSL connection. Defaults to: true. +* **pythonExternalTextTransformGcsPath**: The Cloud Storage path pattern for the Python code containing your user-defined functions. For example, `gs://your-bucket/your-function.py`. +* **pythonExternalTextTransformFunctionName**: The name of the function to call from your Python file. Use only letters, digits, and underscores. For example, `'transform' or 'transform_udf1'`. @@ -240,11 +240,11 @@ resource "google_dataflow_flex_template_job" "cloud_pubsub_to_mongodb_xlang" { name = "cloud-pubsub-to-mongodb-xlang" region = var.region parameters = { - inputSubscription = "projects/your-project-id/subscriptions/your-subscription-name" - mongoDBUri = "host1:port,host2:port,host3:port" - database = "my-db" - collection = "my-collection" - deadletterTable = "your-project-id:your-dataset.your-table-name" + inputSubscription = "" + mongoDBUri = "" + database = "" + collection = "" + deadletterTable = "" # batchSize = "1000" # batchSizeBytes = "5242880" # maxConnectionIdleTime = "60000" @@ -252,8 +252,8 @@ resource "google_dataflow_flex_template_job" "cloud_pubsub_to_mongodb_xlang" { # ignoreSSLCertificate = "true" # withOrdered = "true" # withSSLInvalidHostNameAllowed = "true" - # pythonExternalTextTransformGcsPath = "gs://your-bucket/your-function.py" - # pythonExternalTextTransformFunctionName = "'transform' or 'transform_udf1'" + # pythonExternalTextTransformGcsPath = "" + # pythonExternalTextTransformFunctionName = "" } } ``` diff --git a/v2/pubsub-to-redis/README_Cloud_PubSub_to_Redis.md b/v2/pubsub-to-redis/README_Cloud_PubSub_to_Redis.md index a654b6cef8..48d20cc9b0 100644 --- a/v2/pubsub-to-redis/README_Cloud_PubSub_to_Redis.md +++ b/v2/pubsub-to-redis/README_Cloud_PubSub_to_Redis.md @@ -26,20 +26,20 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **inputSubscription** : The Pub/Sub subscription to read the input from, in the format projects//subscriptions/. (Example: projects/your-project-id/subscriptions/your-subscription-name). -* **redisHost** : The Redis database host. (Example: your.cloud.db.redislabs.com). Defaults to: 127.0.0.1. -* **redisPort** : The Redis database port. (Example: 12345). Defaults to: 6379. -* **redisPassword** : The Redis database password. Defaults to empty. +* **inputSubscription**: The Pub/Sub subscription to read the input from. For example, `projects//subscriptions/`. +* **redisHost**: The Redis database host. For example, `your.cloud.db.redislabs.com`. Defaults to: 127.0.0.1. +* **redisPort**: The Redis database port. For example, `12345`. Defaults to: 6379. +* **redisPassword**: The Redis database password. Defaults to `empty`. ### Optional parameters -* **sslEnabled** : The Redis database SSL parameter. Defaults to: false. -* **redisSinkType** : The Redis sink. Supported values are `STRING_SINK, HASH_SINK, STREAMS_SINK, and LOGGING_SINK`. (Example: STRING_SINK). Defaults to: STRING_SINK. -* **connectionTimeout** : The Redis connection timeout in milliseconds. (Example: 2000). Defaults to: 2000. -* **ttl** : The key expiration time in seconds. The `ttl` default for `HASH_SINK` is -1, which means it never expires. -* **javascriptTextTransformGcsPath** : The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. (Example: gs://my-bucket/my-udfs/my_file.js). -* **javascriptTextTransformFunctionName** : The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). -* **javascriptTextTransformReloadIntervalMinutes** : Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is 0, UDF reloading is disabled. The default value is 0. +* **sslEnabled**: The Redis database SSL parameter. Defaults to: false. +* **redisSinkType**: The Redis sink. Supported values are `STRING_SINK, HASH_SINK, STREAMS_SINK, and LOGGING_SINK`. For example, `STRING_SINK`. Defaults to: STRING_SINK. +* **connectionTimeout**: The Redis connection timeout in milliseconds. For example, `2000`. Defaults to: 2000. +* **ttl**: The key expiration time in seconds. The `ttl` default for `HASH_SINK` is -1, which means it never expires. +* **javascriptTextTransformGcsPath**: The Cloud Storage URI of the .js file that defines the JavaScript user-defined function (UDF) to use. For example, `gs://my-bucket/my-udfs/my_file.js`. +* **javascriptTextTransformFunctionName**: The name of the JavaScript user-defined function (UDF) to use. For example, if your JavaScript function code is `myTransform(inJson) { /*...do stuff...*/ }`, then the function name is `myTransform`. For sample JavaScript UDFs, see UDF Examples (https://github.com/GoogleCloudPlatform/DataflowTemplates#udf-examples). +* **javascriptTextTransformReloadIntervalMinutes**: Specifies how frequently to reload the UDF, in minutes. If the value is greater than 0, Dataflow periodically checks the UDF file in Cloud Storage, and reloads the UDF if the file is modified. This parameter allows you to update the UDF while the pipeline is running, without needing to restart the job. If the value is `0`, UDF reloading is disabled. The default value is `0`. ## User-Defined functions (UDFs) @@ -241,15 +241,15 @@ resource "google_dataflow_flex_template_job" "cloud_pubsub_to_redis" { name = "cloud-pubsub-to-redis" region = var.region parameters = { - inputSubscription = "projects/your-project-id/subscriptions/your-subscription-name" - redisHost = "your.cloud.db.redislabs.com" - redisPort = "12345" + inputSubscription = "" + redisHost = "127.0.0.1" + redisPort = "6379" redisPassword = "" # sslEnabled = "false" # redisSinkType = "STRING_SINK" # connectionTimeout = "2000" # ttl = "-1" - # javascriptTextTransformGcsPath = "gs://my-bucket/my-udfs/my_file.js" + # javascriptTextTransformGcsPath = "" # javascriptTextTransformFunctionName = "" # javascriptTextTransformReloadIntervalMinutes = "0" } diff --git a/v2/pubsub-to-redis/src/main/java/com/google/cloud/teleport/v2/templates/PubSubToRedis.java b/v2/pubsub-to-redis/src/main/java/com/google/cloud/teleport/v2/templates/PubSubToRedis.java index 25db3a35c1..c0e35e550c 100644 --- a/v2/pubsub-to-redis/src/main/java/com/google/cloud/teleport/v2/templates/PubSubToRedis.java +++ b/v2/pubsub-to-redis/src/main/java/com/google/cloud/teleport/v2/templates/PubSubToRedis.java @@ -135,10 +135,8 @@ public interface PubSubToRedisOptions order = 1, groupName = "Source", description = "Pub/Sub input subscription", - helpText = - "The Pub/Sub subscription to read the input from, in the format" - + " projects//subscriptions/.", - example = "projects/your-project-id/subscriptions/your-subscription-name") + helpText = "The Pub/Sub subscription to read the input from.", + example = "projects//subscriptions/") String getInputSubscription(); void setInputSubscription(String value); @@ -171,7 +169,7 @@ public interface PubSubToRedisOptions order = 4, groupName = "Target", description = "Redis DB Password", - helpText = "The Redis database password. Defaults to empty.") + helpText = "The Redis database password. Defaults to `empty`.") @Default.String("") @Validation.Required String getRedisPassword(); diff --git a/v2/sourcedb-to-spanner/README_Cloud_Datastream_to_Spanner.md b/v2/sourcedb-to-spanner/README_Cloud_Datastream_to_Spanner.md index 4045eadecd..d4fc9706a3 100644 --- a/v2/sourcedb-to-spanner/README_Cloud_Datastream_to_Spanner.md +++ b/v2/sourcedb-to-spanner/README_Cloud_Datastream_to_Spanner.md @@ -42,41 +42,37 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **instanceId** : The Spanner instance where the changes are replicated. -* **databaseId** : The Spanner database where the changes are replicated. +* **instanceId**: The Spanner instance where the changes are replicated. +* **databaseId**: The Spanner database where the changes are replicated. +* **streamName**: The name or template for the stream to poll for schema information and source type. ### Optional parameters -* **inputFilePattern** : The Cloud Storage file location that contains the Datastream files to replicate. Typically, this is the root path for a stream. Support for this feature has been disabled. -* **inputFileFormat** : The format of the output file produced by Datastream. For example `avro,json`. Default, `avro`. -* **sessionFilePath** : Session file path in Cloud Storage that contains mapping information from HarbourBridge. -* **projectId** : The Spanner project ID. -* **spannerHost** : The Cloud Spanner endpoint to call in the template. (Example: https://batch-spanner.googleapis.com). Defaults to: https://batch-spanner.googleapis.com. -* **gcsPubSubSubscription** : The Pub/Sub subscription being used in a Cloud Storage notification policy. The name should be in the format of projects//subscriptions/. -* **streamName** : The name or template for the stream to poll for schema information and source type. -* **shadowTablePrefix** : The prefix used to name shadow tables. Default: `shadow_`. -* **shouldCreateShadowTables** : This flag indicates whether shadow tables must be created in Cloud Spanner database. Defaults to: true. -* **rfcStartDateTime** : The starting DateTime used to fetch from Cloud Storage (https://tools.ietf.org/html/rfc3339). Defaults to: 1970-01-01T00:00:00.00Z. -* **fileReadConcurrency** : The number of concurrent DataStream files to read. Defaults to: 30. -* **deadLetterQueueDirectory** : The file path used when storing the error queue output. The default file path is a directory under the Dataflow job's temp location. -* **dlqRetryMinutes** : The number of minutes between dead letter queue retries. Defaults to 10. -* **dlqMaxRetryCount** : The max number of times temporary errors can be retried through DLQ. Defaults to 500. -* **dataStreamRootUrl** : Datastream API Root URL. Defaults to: https://datastream.googleapis.com/. -* **datastreamSourceType** : This is the type of source database that Datastream connects to. Example - mysql/oracle. Need to be set when testing without an actual running Datastream. -* **roundJsonDecimals** : This flag if set, rounds the decimal values in json columns to a number that can be stored without loss of precision. Defaults to: false. -* **runMode** : This is the run mode type, whether regular or with retryDLQ. Defaults to: regular. -* **transformationContextFilePath** : Transformation context file path in cloud storage used to populate data used in transformations performed during migrations Eg: The shard id to db name to identify the db from which a row was migrated. -* **directoryWatchDurationInMinutes** : The Duration for which the pipeline should keep polling a directory in GCS. Datastreamoutput files are arranged in a directory structure which depicts the timestamp of the event grouped by minutes. This parameter should be approximately equal tomaximum delay which could occur between event occurring in source database and the same event being written to GCS by Datastream. 99.9 percentile = 10 minutes. Defaults to: 10. -* **spannerPriority** : The request priority for Cloud Spanner calls. The value must be one of: [HIGH,MEDIUM,LOW]. Defaults to HIGH. -* **dlqGcsPubSubSubscription** : The Pub/Sub subscription being used in a Cloud Storage notification policy for DLQ retry directory when running in regular mode. The name should be in the format of projects//subscriptions/. When set, the deadLetterQueueDirectory and dlqRetryMinutes are ignored. -* **transformationJarPath** : Custom jar location in Cloud Storage that contains the custom transformation logic for processing records in forward migration. Defaults to empty. -* **transformationClassName** : Fully qualified class name having the custom transformation logic. It is a mandatory field in case transformationJarPath is specified. Defaults to empty. -* **transformationCustomParameters** : String containing any custom parameters to be passed to the custom transformation class. Defaults to empty. -* **filteredEventsDirectory** : This is the file path to store the events filtered via custom transformation. Default is a directory under the Dataflow job's temp location. The default value is enough under most conditions. -* **shardingContextFilePath** : Sharding context file path in cloud storage is used to populate the shard id in spanner database for each source shard.It is of the format Map>. -* **tableOverrides** : These are the table name overrides from source to spanner. They are written in thefollowing format: [{SourceTableName1, SpannerTableName1}, {SourceTableName2, SpannerTableName2}]This example shows mapping Singers table to Vocalists and Albums table to Records. (Example: [{Singers, Vocalists}, {Albums, Records}]). Defaults to empty. -* **columnOverrides** : These are the column name overrides from source to spanner. They are written in thefollowing format: [{SourceTableName1.SourceColumnName1, SourceTableName1.SpannerColumnName1}, {SourceTableName2.SourceColumnName1, SourceTableName2.SpannerColumnName1}]Note that the SourceTableName should remain the same in both the source and spanner pair. To override table names, use tableOverrides.The example shows mapping SingerName to TalentName and AlbumName to RecordName in Singers and Albums table respectively. (Example: [{Singers.SingerName, Singers.TalentName}, {Albums.AlbumName, Albums.RecordName}]). Defaults to empty. -* **schemaOverridesFilePath** : A file which specifies the table and the column name overrides from source to spanner. Defaults to empty. +* **inputFilePattern**: The Cloud Storage file location that contains the Datastream files to replicate. Typically, this is the root path for a stream. Support for this feature has been disabled. +* **inputFileFormat**: The format of the output file produced by Datastream. For example `avro,json`. Defaults to `avro`. +* **sessionFilePath**: Session file path in Cloud Storage that contains mapping information from HarbourBridge. +* **projectId**: The Spanner project ID. +* **spannerHost**: The Cloud Spanner endpoint to call in the template. For example, `https://batch-spanner.googleapis.com`. Defaults to: https://batch-spanner.googleapis.com. +* **gcsPubSubSubscription**: The Pub/Sub subscription being used in a Cloud Storage notification policy. For the name, use the format `projects//subscriptions/`. +* **shadowTablePrefix**: The prefix used to name shadow tables. Default: `shadow_`. +* **shouldCreateShadowTables**: This flag indicates whether shadow tables must be created in Cloud Spanner database. Defaults to: true. +* **rfcStartDateTime**: The starting DateTime used to fetch from Cloud Storage (https://tools.ietf.org/html/rfc3339). Defaults to: 1970-01-01T00:00:00.00Z. +* **fileReadConcurrency**: The number of concurrent DataStream files to read. Defaults to: 30. +* **deadLetterQueueDirectory**: The file path used when storing the error queue output. The default file path is a directory under the Dataflow job's temp location. +* **dlqRetryMinutes**: The number of minutes between dead letter queue retries. Defaults to `10`. +* **dlqMaxRetryCount**: The max number of times temporary errors can be retried through DLQ. Defaults to `500`. +* **dataStreamRootUrl**: Datastream API Root URL. Defaults to: https://datastream.googleapis.com/. +* **datastreamSourceType**: This is the type of source database that Datastream connects to. Example - mysql/oracle. Need to be set when testing without an actual running Datastream. +* **roundJsonDecimals**: This flag if set, rounds the decimal values in json columns to a number that can be stored without loss of precision. Defaults to: false. +* **runMode**: This is the run mode type, whether regular or with retryDLQ. Defaults to: regular. +* **transformationContextFilePath**: Transformation context file path in cloud storage used to populate data used in transformations performed during migrations Eg: The shard id to db name to identify the db from which a row was migrated. +* **directoryWatchDurationInMinutes**: The Duration for which the pipeline should keep polling a directory in GCS. Datastreamoutput files are arranged in a directory structure which depicts the timestamp of the event grouped by minutes. This parameter should be approximately equal tomaximum delay which could occur between event occurring in source database and the same event being written to GCS by Datastream. 99.9 percentile = 10 minutes. Defaults to: 10. +* **spannerPriority**: The request priority for Cloud Spanner calls. The value must be one of: [`HIGH`,`MEDIUM`,`LOW`]. Defaults to `HIGH`. +* **dlqGcsPubSubSubscription**: The Pub/Sub subscription being used in a Cloud Storage notification policy for DLQ retry directory when running in regular mode. For the name, use the format `projects//subscriptions/`. When set, the deadLetterQueueDirectory and dlqRetryMinutes are ignored. +* **transformationJarPath**: Custom JAR file location in Cloud Storage for the file that contains the custom transformation logic for processing records in forward migration. Defaults to empty. +* **transformationClassName**: Fully qualified class name having the custom transformation logic. It is a mandatory field in case transformationJarPath is specified. Defaults to empty. +* **transformationCustomParameters**: String containing any custom parameters to be passed to the custom transformation class. Defaults to empty. +* **filteredEventsDirectory**: This is the file path to store the events filtered via custom transformation. Default is a directory under the Dataflow job's temp location. The default value is enough under most conditions. @@ -157,6 +153,7 @@ export TEMPLATE_SPEC_GCSPATH="gs://$BUCKET_NAME/templates/flex/Cloud_Datastream_ ### Required export INSTANCE_ID= export DATABASE_ID= +export STREAM_NAME= ### Optional export INPUT_FILE_PATTERN= @@ -165,7 +162,6 @@ export SESSION_FILE_PATH= export PROJECT_ID= export SPANNER_HOST=https://batch-spanner.googleapis.com export GCS_PUB_SUB_SUBSCRIPTION= -export STREAM_NAME= export SHADOW_TABLE_PREFIX=shadow_ export SHOULD_CREATE_SHADOW_TABLES=true export RFC_START_DATE_TIME=1970-01-01T00:00:00.00Z @@ -185,10 +181,6 @@ export TRANSFORMATION_JAR_PATH="" export TRANSFORMATION_CLASS_NAME="" export TRANSFORMATION_CUSTOM_PARAMETERS="" export FILTERED_EVENTS_DIRECTORY="" -export SHARDING_CONTEXT_FILE_PATH= -export TABLE_OVERRIDES="" -export COLUMN_OVERRIDES="" -export SCHEMA_OVERRIDES_FILE_PATH="" gcloud dataflow flex-template run "cloud-datastream-to-spanner-job" \ --project "$PROJECT" \ @@ -221,11 +213,7 @@ gcloud dataflow flex-template run "cloud-datastream-to-spanner-job" \ --parameters "transformationJarPath=$TRANSFORMATION_JAR_PATH" \ --parameters "transformationClassName=$TRANSFORMATION_CLASS_NAME" \ --parameters "transformationCustomParameters=$TRANSFORMATION_CUSTOM_PARAMETERS" \ - --parameters "filteredEventsDirectory=$FILTERED_EVENTS_DIRECTORY" \ - --parameters "shardingContextFilePath=$SHARDING_CONTEXT_FILE_PATH" \ - --parameters "tableOverrides=$TABLE_OVERRIDES" \ - --parameters "columnOverrides=$COLUMN_OVERRIDES" \ - --parameters "schemaOverridesFilePath=$SCHEMA_OVERRIDES_FILE_PATH" + --parameters "filteredEventsDirectory=$FILTERED_EVENTS_DIRECTORY" ``` For more information about the command, please check: @@ -246,6 +234,7 @@ export REGION=us-central1 ### Required export INSTANCE_ID= export DATABASE_ID= +export STREAM_NAME= ### Optional export INPUT_FILE_PATTERN= @@ -254,7 +243,6 @@ export SESSION_FILE_PATH= export PROJECT_ID= export SPANNER_HOST=https://batch-spanner.googleapis.com export GCS_PUB_SUB_SUBSCRIPTION= -export STREAM_NAME= export SHADOW_TABLE_PREFIX=shadow_ export SHOULD_CREATE_SHADOW_TABLES=true export RFC_START_DATE_TIME=1970-01-01T00:00:00.00Z @@ -274,10 +262,6 @@ export TRANSFORMATION_JAR_PATH="" export TRANSFORMATION_CLASS_NAME="" export TRANSFORMATION_CUSTOM_PARAMETERS="" export FILTERED_EVENTS_DIRECTORY="" -export SHARDING_CONTEXT_FILE_PATH= -export TABLE_OVERRIDES="" -export COLUMN_OVERRIDES="" -export SCHEMA_OVERRIDES_FILE_PATH="" mvn clean package -PtemplatesRun \ -DskipTests \ @@ -286,7 +270,7 @@ mvn clean package -PtemplatesRun \ -Dregion="$REGION" \ -DjobName="cloud-datastream-to-spanner-job" \ -DtemplateName="Cloud_Datastream_to_Spanner" \ --Dparameters="inputFilePattern=$INPUT_FILE_PATTERN,inputFileFormat=$INPUT_FILE_FORMAT,sessionFilePath=$SESSION_FILE_PATH,instanceId=$INSTANCE_ID,databaseId=$DATABASE_ID,projectId=$PROJECT_ID,spannerHost=$SPANNER_HOST,gcsPubSubSubscription=$GCS_PUB_SUB_SUBSCRIPTION,streamName=$STREAM_NAME,shadowTablePrefix=$SHADOW_TABLE_PREFIX,shouldCreateShadowTables=$SHOULD_CREATE_SHADOW_TABLES,rfcStartDateTime=$RFC_START_DATE_TIME,fileReadConcurrency=$FILE_READ_CONCURRENCY,deadLetterQueueDirectory=$DEAD_LETTER_QUEUE_DIRECTORY,dlqRetryMinutes=$DLQ_RETRY_MINUTES,dlqMaxRetryCount=$DLQ_MAX_RETRY_COUNT,dataStreamRootUrl=$DATA_STREAM_ROOT_URL,datastreamSourceType=$DATASTREAM_SOURCE_TYPE,roundJsonDecimals=$ROUND_JSON_DECIMALS,runMode=$RUN_MODE,transformationContextFilePath=$TRANSFORMATION_CONTEXT_FILE_PATH,directoryWatchDurationInMinutes=$DIRECTORY_WATCH_DURATION_IN_MINUTES,spannerPriority=$SPANNER_PRIORITY,dlqGcsPubSubSubscription=$DLQ_GCS_PUB_SUB_SUBSCRIPTION,transformationJarPath=$TRANSFORMATION_JAR_PATH,transformationClassName=$TRANSFORMATION_CLASS_NAME,transformationCustomParameters=$TRANSFORMATION_CUSTOM_PARAMETERS,filteredEventsDirectory=$FILTERED_EVENTS_DIRECTORY,shardingContextFilePath=$SHARDING_CONTEXT_FILE_PATH,tableOverrides=$TABLE_OVERRIDES,columnOverrides=$COLUMN_OVERRIDES,schemaOverridesFilePath=$SCHEMA_OVERRIDES_FILE_PATH" \ +-Dparameters="inputFilePattern=$INPUT_FILE_PATTERN,inputFileFormat=$INPUT_FILE_FORMAT,sessionFilePath=$SESSION_FILE_PATH,instanceId=$INSTANCE_ID,databaseId=$DATABASE_ID,projectId=$PROJECT_ID,spannerHost=$SPANNER_HOST,gcsPubSubSubscription=$GCS_PUB_SUB_SUBSCRIPTION,streamName=$STREAM_NAME,shadowTablePrefix=$SHADOW_TABLE_PREFIX,shouldCreateShadowTables=$SHOULD_CREATE_SHADOW_TABLES,rfcStartDateTime=$RFC_START_DATE_TIME,fileReadConcurrency=$FILE_READ_CONCURRENCY,deadLetterQueueDirectory=$DEAD_LETTER_QUEUE_DIRECTORY,dlqRetryMinutes=$DLQ_RETRY_MINUTES,dlqMaxRetryCount=$DLQ_MAX_RETRY_COUNT,dataStreamRootUrl=$DATA_STREAM_ROOT_URL,datastreamSourceType=$DATASTREAM_SOURCE_TYPE,roundJsonDecimals=$ROUND_JSON_DECIMALS,runMode=$RUN_MODE,transformationContextFilePath=$TRANSFORMATION_CONTEXT_FILE_PATH,directoryWatchDurationInMinutes=$DIRECTORY_WATCH_DURATION_IN_MINUTES,spannerPriority=$SPANNER_PRIORITY,dlqGcsPubSubSubscription=$DLQ_GCS_PUB_SUB_SUBSCRIPTION,transformationJarPath=$TRANSFORMATION_JAR_PATH,transformationClassName=$TRANSFORMATION_CLASS_NAME,transformationCustomParameters=$TRANSFORMATION_CUSTOM_PARAMETERS,filteredEventsDirectory=$FILTERED_EVENTS_DIRECTORY" \ -f v2/datastream-to-spanner ``` @@ -333,13 +317,13 @@ resource "google_dataflow_flex_template_job" "cloud_datastream_to_spanner" { parameters = { instanceId = "" databaseId = "" + streamName = "" # inputFilePattern = "" # inputFileFormat = "avro" # sessionFilePath = "" # projectId = "" # spannerHost = "https://batch-spanner.googleapis.com" # gcsPubSubSubscription = "" - # streamName = "" # shadowTablePrefix = "shadow_" # shouldCreateShadowTables = "true" # rfcStartDateTime = "1970-01-01T00:00:00.00Z" @@ -359,10 +343,6 @@ resource "google_dataflow_flex_template_job" "cloud_datastream_to_spanner" { # transformationClassName = "" # transformationCustomParameters = "" # filteredEventsDirectory = "" - # shardingContextFilePath = "" - # tableOverrides = "[{Singers, Vocalists}, {Albums, Records}]" - # columnOverrides = "[{Singers.SingerName, Singers.TalentName}, {Albums.AlbumName, Albums.RecordName}]" - # schemaOverridesFilePath = "" } } ``` diff --git a/v2/sourcedb-to-spanner/README_Sourcedb_to_Spanner_Flex.md b/v2/sourcedb-to-spanner/README_Sourcedb_to_Spanner_Flex.md index b3d786e330..c1c484ede5 100644 --- a/v2/sourcedb-to-spanner/README_Sourcedb_to_Spanner_Flex.md +++ b/v2/sourcedb-to-spanner/README_Sourcedb_to_Spanner_Flex.md @@ -27,30 +27,29 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **sourceConfigURL** : The JDBC connection URL string. For example, `jdbc:mysql://127.4.5.30:3306/my-db?autoReconnect=true&maxReconnects=10&unicode=true&characterEncoding=UTF-8` or the shard config. -* **instanceId** : The destination Cloud Spanner instance. -* **databaseId** : The destination Cloud Spanner database. -* **projectId** : This is the name of the Cloud Spanner project. -* **outputDirectory** : This directory is used to dump the failed/skipped/filtered records in a migration. +* **sourceConfigURL**: The JDBC connection URL string. For example, `jdbc:mysql://127.4.5.30:3306/my-db?autoReconnect=true&maxReconnects=10&unicode=true&characterEncoding=UTF-8` or the shard config. +* **instanceId**: The destination Cloud Spanner instance. +* **databaseId**: The destination Cloud Spanner database. +* **projectId**: This is the name of the Cloud Spanner project. +* **outputDirectory**: This directory is used to dump the failed/skipped/filtered records in a migration. ### Optional parameters -* **sourceDbDialect** : Possible values are `MYSQL` and `POSTGRESQL`. Defaults to: MYSQL. -* **jdbcDriverJars** : The comma-separated list of driver JAR files. (Example: gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar). Defaults to empty. -* **jdbcDriverClassName** : The JDBC driver class name. (Example: com.mysql.jdbc.Driver). Defaults to: com.mysql.jdbc.Driver. -* **username** : The username to be used for the JDBC connection. Defaults to empty. -* **password** : The password to be used for the JDBC connection. Defaults to empty. -* **tables** : Tables to migrate from source. Defaults to empty. -* **numPartitions** : The number of partitions. This, along with the lower and upper bound, form partitions strides for generated WHERE clause expressions used to split the partition column evenly. When the input is less than 1, the number is set to 1. Defaults to: 0. -* **spannerHost** : The Cloud Spanner endpoint to call in the template. (Example: https://batch-spanner.googleapis.com). Defaults to: https://batch-spanner.googleapis.com. -* **maxConnections** : Configures the JDBC connection pool on each worker with maximum number of connections. Use a negative number for no limit. (Example: -1). Defaults to: 0. -* **sessionFilePath** : Session file path in Cloud Storage that contains mapping information from Spanner Migration Tool. Defaults to empty. -* **transformationJarPath** : Custom jar location in Cloud Storage that contains the custom transformation logic for processing records. Defaults to empty. -* **transformationClassName** : Fully qualified class name having the custom transformation logic. It is a mandatory field in case transformationJarPath is specified. Defaults to empty. -* **transformationCustomParameters** : String containing any custom parameters to be passed to the custom transformation class. Defaults to empty. -* **namespace** : Namespace to exported. For PostgreSQL, if no namespace is provided, 'public' will be used. Defaults to empty. -* **disabledAlgorithms** : Comma separated algorithms to disable. If this value is set to none, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. (Example: SSLv3, RC4). -* **extraFilesToStage** : Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. (Example: gs:///file.txt,projects//secrets//versions/). +* **sourceDbDialect**: Possible values are `MYSQL` and `POSTGRESQL`. Defaults to: MYSQL. +* **jdbcDriverJars**: The comma-separated list of driver JAR files. For example, `gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar`. Defaults to empty. +* **jdbcDriverClassName**: The JDBC driver class name. For example, `com.mysql.jdbc.Driver`. Defaults to: com.mysql.jdbc.Driver. +* **username**: The username to be used for the JDBC connection. Defaults to empty. +* **password**: The password to be used for the JDBC connection. Defaults to empty. +* **tables**: Tables to migrate from source. Defaults to empty. +* **numPartitions**: The number of partitions. This, along with the lower and upper bound, form partitions strides for generated WHERE clause expressions used to split the partition column evenly. When the input is less than 1, the number is set to 1. Defaults to: 0. +* **spannerHost**: The Cloud Spanner endpoint to call in the template. For example, `https://batch-spanner.googleapis.com`. Defaults to: https://batch-spanner.googleapis.com. +* **maxConnections**: Configures the JDBC connection pool on each worker with maximum number of connections. Use a negative number for no limit. For example, `-1`. Defaults to: 0. +* **sessionFilePath**: Session file path in Cloud Storage that contains mapping information from Spanner Migration Tool. Defaults to empty. +* **transformationJarPath**: Custom jar location in Cloud Storage that contains the custom transformation logic for processing records. Defaults to empty. +* **transformationClassName**: Fully qualified class name having the custom transformation logic. It is a mandatory field in case transformationJarPath is specified. Defaults to empty. +* **transformationCustomParameters**: String containing any custom parameters to be passed to the custom transformation class. Defaults to empty. +* **disabledAlgorithms**: Comma separated algorithms to disable. If this value is set to `none`, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. For example, `SSLv3, RC4`. +* **extraFilesToStage**: Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. For example, `gs:///file.txt,projects//secrets//versions/`. @@ -149,7 +148,6 @@ export SESSION_FILE_PATH="" export TRANSFORMATION_JAR_PATH="" export TRANSFORMATION_CLASS_NAME="" export TRANSFORMATION_CUSTOM_PARAMETERS="" -export NAMESPACE="" export DISABLED_ALGORITHMS= export EXTRA_FILES_TO_STAGE= @@ -175,7 +173,6 @@ gcloud dataflow flex-template run "sourcedb-to-spanner-flex-job" \ --parameters "transformationJarPath=$TRANSFORMATION_JAR_PATH" \ --parameters "transformationClassName=$TRANSFORMATION_CLASS_NAME" \ --parameters "transformationCustomParameters=$TRANSFORMATION_CUSTOM_PARAMETERS" \ - --parameters "namespace=$NAMESPACE" \ --parameters "disabledAlgorithms=$DISABLED_ALGORITHMS" \ --parameters "extraFilesToStage=$EXTRA_FILES_TO_STAGE" ``` @@ -216,7 +213,6 @@ export SESSION_FILE_PATH="" export TRANSFORMATION_JAR_PATH="" export TRANSFORMATION_CLASS_NAME="" export TRANSFORMATION_CUSTOM_PARAMETERS="" -export NAMESPACE="" export DISABLED_ALGORITHMS= export EXTRA_FILES_TO_STAGE= @@ -227,7 +223,7 @@ mvn clean package -PtemplatesRun \ -Dregion="$REGION" \ -DjobName="sourcedb-to-spanner-flex-job" \ -DtemplateName="Sourcedb_to_Spanner_Flex" \ --Dparameters="sourceDbDialect=$SOURCE_DB_DIALECT,jdbcDriverJars=$JDBC_DRIVER_JARS,jdbcDriverClassName=$JDBC_DRIVER_CLASS_NAME,sourceConfigURL=$SOURCE_CONFIG_URL,username=$USERNAME,password=$PASSWORD,tables=$TABLES,numPartitions=$NUM_PARTITIONS,instanceId=$INSTANCE_ID,databaseId=$DATABASE_ID,projectId=$PROJECT_ID,spannerHost=$SPANNER_HOST,maxConnections=$MAX_CONNECTIONS,sessionFilePath=$SESSION_FILE_PATH,outputDirectory=$OUTPUT_DIRECTORY,transformationJarPath=$TRANSFORMATION_JAR_PATH,transformationClassName=$TRANSFORMATION_CLASS_NAME,transformationCustomParameters=$TRANSFORMATION_CUSTOM_PARAMETERS,namespace=$NAMESPACE,disabledAlgorithms=$DISABLED_ALGORITHMS,extraFilesToStage=$EXTRA_FILES_TO_STAGE" \ +-Dparameters="sourceDbDialect=$SOURCE_DB_DIALECT,jdbcDriverJars=$JDBC_DRIVER_JARS,jdbcDriverClassName=$JDBC_DRIVER_CLASS_NAME,sourceConfigURL=$SOURCE_CONFIG_URL,username=$USERNAME,password=$PASSWORD,tables=$TABLES,numPartitions=$NUM_PARTITIONS,instanceId=$INSTANCE_ID,databaseId=$DATABASE_ID,projectId=$PROJECT_ID,spannerHost=$SPANNER_HOST,maxConnections=$MAX_CONNECTIONS,sessionFilePath=$SESSION_FILE_PATH,outputDirectory=$OUTPUT_DIRECTORY,transformationJarPath=$TRANSFORMATION_JAR_PATH,transformationClassName=$TRANSFORMATION_CLASS_NAME,transformationCustomParameters=$TRANSFORMATION_CUSTOM_PARAMETERS,disabledAlgorithms=$DISABLED_ALGORITHMS,extraFilesToStage=$EXTRA_FILES_TO_STAGE" \ -f v2/sourcedb-to-spanner ``` @@ -278,21 +274,20 @@ resource "google_dataflow_flex_template_job" "sourcedb_to_spanner_flex" { projectId = "" outputDirectory = "" # sourceDbDialect = "MYSQL" - # jdbcDriverJars = "gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar" + # jdbcDriverJars = "" # jdbcDriverClassName = "com.mysql.jdbc.Driver" # username = "" # password = "" # tables = "" # numPartitions = "0" # spannerHost = "https://batch-spanner.googleapis.com" - # maxConnections = "-1" + # maxConnections = "0" # sessionFilePath = "" # transformationJarPath = "" # transformationClassName = "" # transformationCustomParameters = "" - # namespace = "" - # disabledAlgorithms = "SSLv3, RC4" - # extraFilesToStage = "gs:///file.txt,projects//secrets//versions/" + # disabledAlgorithms = "" + # extraFilesToStage = "" } } ``` diff --git a/v2/spanner-change-streams-to-sharded-file-sink/README_Spanner_Change_Streams_to_Sharded_File_Sink.md b/v2/spanner-change-streams-to-sharded-file-sink/README_Spanner_Change_Streams_to_Sharded_File_Sink.md index 76b2832750..837f474939 100644 --- a/v2/spanner-change-streams-to-sharded-file-sink/README_Spanner_Change_Streams_to_Sharded_File_Sink.md +++ b/v2/spanner-change-streams-to-sharded-file-sink/README_Spanner_Change_Streams_to_Sharded_File_Sink.md @@ -17,29 +17,29 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **changeStreamName** : This is the name of the Spanner change stream that the pipeline will read from. -* **instanceId** : This is the name of the Cloud Spanner instance where the changestream is present. -* **databaseId** : This is the name of the Cloud Spanner database that the changestream is monitoring. -* **spannerProjectId** : This is the name of the Cloud Spanner project. -* **metadataInstance** : This is the instance to store the metadata used by the connector to control the consumption of the change stream API data. -* **metadataDatabase** : This is the database to store the metadata used by the connector to control the consumption of the change stream API data. -* **gcsOutputDirectory** : The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse directory path for date & time formatters. (Example: gs://your-bucket/your-path/). -* **sourceShardsFilePath** : Source shard details file path in Cloud Storage that contains connection profile of source shards. Atleast one shard information is expected. -* **runIdentifier** : The identifier to distinguish between different runs of reverse replication flows. +* **changeStreamName**: This is the name of the Spanner change stream that the pipeline will read from. +* **instanceId**: This is the name of the Cloud Spanner instance where the changestream is present. +* **databaseId**: This is the name of the Cloud Spanner database that the changestream is monitoring. +* **spannerProjectId**: This is the name of the Cloud Spanner project. +* **metadataInstance**: This is the instance to store the metadata used by the connector to control the consumption of the change stream API data. +* **metadataDatabase**: This is the database to store the metadata used by the connector to control the consumption of the change stream API data. +* **gcsOutputDirectory**: The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse directory path for date & time formatters. For example, `gs://your-bucket/your-path/`. +* **sourceShardsFilePath**: Source shard details file path in Cloud Storage that contains connection profile of source shards. Atleast one shard information is expected. +* **runIdentifier**: The identifier to distinguish between different runs of reverse replication flows. ### Optional parameters -* **startTimestamp** : Read changes from the given timestamp. Defaults to empty. -* **endTimestamp** : Read changes until the given timestamp. If no timestamp provided, reads indefinitely. Defaults to empty. -* **sessionFilePath** : Session file path in Cloud Storage that contains mapping information from HarbourBridge. Needed when doing sharded reverse replication. -* **windowDuration** : The window duration/size in which data will be written to Cloud Storage. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). (Example: 5m). Defaults to: 10s. -* **filtrationMode** : Mode of Filtration, decides how to drop certain records based on a criteria. Currently supported modes are: none (filter nothing), forward_migration (filter records written via the forward migration pipeline). Defaults to forward_migration. -* **metadataTableSuffix** : Suffix appended to the spanner_to_gcs_metadata and shard_file_create_progress metadata tables.Useful when doing multiple runs.Only alpha numeric and underscores are allowed. Defaults to empty. -* **skipDirectoryName** : Records skipped from reverse replication are written to this directory. Default directory name is skip. -* **runMode** : Regular starts from input start time, resume start from last processed time. Defaults to: regular. -* **shardingCustomJarPath** : Custom jar location in Cloud Storage that contains the customization logic for fetching shard id. Defaults to empty. -* **shardingCustomClassName** : Fully qualified class name having the custom shard id implementation. It is a mandatory field in case shardingCustomJarPath is specified. Defaults to empty. -* **shardingCustomParameters** : String containing any custom parameters to be passed to the custom sharding class. Defaults to empty. +* **startTimestamp**: Read changes from the given timestamp. Defaults to empty. +* **endTimestamp**: Read changes until the given timestamp. If no timestamp provided, reads indefinitely. Defaults to empty. +* **sessionFilePath**: Session file path in Cloud Storage that contains mapping information from HarbourBridge. Needed when doing sharded reverse replication. +* **windowDuration**: The window duration/size in which data will be written to Cloud Storage. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). For example, `5m`. Defaults to: 10s. +* **filtrationMode**: Mode of Filtration, decides how to drop certain records based on a criteria. Currently supported modes are: none (filter nothing), forward_migration (filter records written via the forward migration pipeline). Defaults to forward_migration. +* **metadataTableSuffix**: Suffix appended to the spanner_to_gcs_metadata and shard_file_create_progress metadata tables.Useful when doing multiple runs.Only alpha numeric and underscores are allowed. Defaults to empty. +* **skipDirectoryName**: Records skipped from reverse replication are written to this directory. Default directory name is skip. +* **runMode**: Regular starts from input start time, resume start from last processed time. Defaults to: regular. +* **shardingCustomJarPath**: Custom jar location in Cloud Storage that contains the customization logic for fetching shard id. Defaults to empty. +* **shardingCustomClassName**: Fully qualified class name having the custom shard id implementation. It is a mandatory field in case shardingCustomJarPath is specified. Defaults to empty. +* **shardingCustomParameters**: String containing any custom parameters to be passed to the custom sharding class. Defaults to empty. @@ -264,13 +264,13 @@ resource "google_dataflow_flex_template_job" "spanner_change_streams_to_sharded_ spannerProjectId = "" metadataInstance = "" metadataDatabase = "" - gcsOutputDirectory = "gs://your-bucket/your-path/" + gcsOutputDirectory = "" sourceShardsFilePath = "" runIdentifier = "" # startTimestamp = "" # endTimestamp = "" # sessionFilePath = "" - # windowDuration = "5m" + # windowDuration = "10s" # filtrationMode = "forward_migration" # metadataTableSuffix = "" # skipDirectoryName = "skip" diff --git a/v2/spanner-to-sourcedb/README_Spanner_to_SourceDb.md b/v2/spanner-to-sourcedb/README_Spanner_to_SourceDb.md index f84df5c26c..0b641e9747 100644 --- a/v2/spanner-to-sourcedb/README_Spanner_to_SourceDb.md +++ b/v2/spanner-to-sourcedb/README_Spanner_to_SourceDb.md @@ -14,33 +14,33 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **changeStreamName** : This is the name of the Spanner change stream that the pipeline will read from. -* **instanceId** : This is the name of the Cloud Spanner instance where the changestream is present. -* **databaseId** : This is the name of the Cloud Spanner database that the changestream is monitoring. -* **spannerProjectId** : This is the name of the Cloud Spanner project. -* **metadataInstance** : This is the instance to store the metadata used by the connector to control the consumption of the change stream API data. -* **metadataDatabase** : This is the database to store the metadata used by the connector to control the consumption of the change stream API data. -* **sourceShardsFilePath** : Path to GCS file containing connection profile info for source shards. +* **changeStreamName**: This is the name of the Spanner change stream that the pipeline will read from. +* **instanceId**: This is the name of the Cloud Spanner instance where the changestream is present. +* **databaseId**: This is the name of the Cloud Spanner database that the changestream is monitoring. +* **spannerProjectId**: This is the name of the Cloud Spanner project. +* **metadataInstance**: This is the instance to store the metadata used by the connector to control the consumption of the change stream API data. +* **metadataDatabase**: This is the database to store the metadata used by the connector to control the consumption of the change stream API data. +* **sourceShardsFilePath**: Path to GCS file containing connection profile info for source shards. ### Optional parameters -* **startTimestamp** : Read changes from the given timestamp. Defaults to empty. -* **endTimestamp** : Read changes until the given timestamp. If no timestamp provided, reads indefinitely. Defaults to empty. -* **shadowTablePrefix** : The prefix used to name shadow tables. Default: `shadow_`. -* **sessionFilePath** : Session file path in Cloud Storage that contains mapping information from HarbourBridge. -* **filtrationMode** : Mode of Filtration, decides how to drop certain records based on a criteria. Currently supported modes are: none (filter nothing), forward_migration (filter records written via the forward migration pipeline). Defaults to forward_migration. -* **shardingCustomJarPath** : Custom jar location in Cloud Storage that contains the customization logic for fetching shard id. Defaults to empty. -* **shardingCustomClassName** : Fully qualified class name having the custom shard id implementation. It is a mandatory field in case shardingCustomJarPath is specified. Defaults to empty. -* **shardingCustomParameters** : String containing any custom parameters to be passed to the custom sharding class. Defaults to empty. -* **sourceDbTimezoneOffset** : This is the timezone offset from UTC for the source database. Example value: +10:00. Defaults to: +00:00. -* **dlqGcsPubSubSubscription** : The Pub/Sub subscription being used in a Cloud Storage notification policy for DLQ retry directory when running in regular mode. The name should be in the format of projects//subscriptions/. When set, the deadLetterQueueDirectory and dlqRetryMinutes are ignored. -* **skipDirectoryName** : Records skipped from reverse replication are written to this directory. Default directory name is skip. -* **maxShardConnections** : This will come from shard file eventually. Defaults to: 10000. -* **deadLetterQueueDirectory** : The file path used when storing the error queue output. The default file path is a directory under the Dataflow job's temp location. -* **dlqMaxRetryCount** : The max number of times temporary errors can be retried through DLQ. Defaults to 500. -* **runMode** : This is the run mode type, whether regular or with retryDLQ.Default is regular. retryDLQ is used to retry the severe DLQ records only. -* **dlqRetryMinutes** : The number of minutes between dead letter queue retries. Defaults to 10. -* **sourceType** : The type of source database to reverse replicate to. Defaults to mysql. +* **startTimestamp**: Read changes from the given timestamp. Defaults to empty. +* **endTimestamp**: Read changes until the given timestamp. If no timestamp provided, reads indefinitely. Defaults to empty. +* **shadowTablePrefix**: The prefix used to name shadow tables. Default: `shadow_`. +* **sessionFilePath**: Session file path in Cloud Storage that contains mapping information from HarbourBridge. +* **filtrationMode**: Mode of Filtration, decides how to drop certain records based on a criteria. Currently supported modes are: none (filter nothing), forward_migration (filter records written via the forward migration pipeline). Defaults to forward_migration. +* **shardingCustomJarPath**: Custom jar location in Cloud Storage that contains the customization logic for fetching shard id. Defaults to empty. +* **shardingCustomClassName**: Fully qualified class name having the custom shard id implementation. It is a mandatory field in case shardingCustomJarPath is specified. Defaults to empty. +* **shardingCustomParameters**: String containing any custom parameters to be passed to the custom sharding class. Defaults to empty. +* **sourceDbTimezoneOffset**: This is the timezone offset from UTC for the source database. Example value: +10:00. Defaults to: +00:00. +* **dlqGcsPubSubSubscription**: The Pub/Sub subscription being used in a Cloud Storage notification policy for DLQ retry directory when running in regular mode. The name should be in the format of projects//subscriptions/. When set, the deadLetterQueueDirectory and dlqRetryMinutes are ignored. +* **skipDirectoryName**: Records skipped from reverse replication are written to this directory. Default directory name is skip. +* **maxShardConnections**: This will come from shard file eventually. Defaults to: 10000. +* **deadLetterQueueDirectory**: The file path used when storing the error queue output. The default file path is a directory under the Dataflow job's temp location. +* **dlqMaxRetryCount**: The max number of times temporary errors can be retried through DLQ. Defaults to 500. +* **runMode**: This is the run mode type, whether regular or with retryDLQ.Default is regular. retryDLQ is used to retry the severe DLQ records only. +* **dlqRetryMinutes**: The number of minutes between dead letter queue retries. Defaults to 10. + ## Getting Started diff --git a/v2/sqlserver-to-googlecloud/README_Jdbc_to_PubSub.md b/v2/sqlserver-to-googlecloud/README_Jdbc_to_PubSub.md index 0e39a3bd2e..63ce6272a0 100644 --- a/v2/sqlserver-to-googlecloud/README_Jdbc_to_PubSub.md +++ b/v2/sqlserver-to-googlecloud/README_Jdbc_to_PubSub.md @@ -18,20 +18,20 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **driverClassName** : The JDBC driver class name. (Example: com.mysql.jdbc.Driver). -* **connectionUrl** : The JDBC connection URL string. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example: 'echo -n "jdbc:mysql://some-host:3306/sampledb" | gcloud kms encrypt --location= --keyring= --key= --plaintext-file=- --ciphertext-file=- | base64' (Example: jdbc:mysql://some-host:3306/sampledb). -* **driverJars** : Comma-separated Cloud Storage paths for JDBC drivers. (Example: gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar). -* **query** : The query to run on the source to extract the data. (Example: select * from sampledb.sample_table). -* **outputTopic** : The Pub/Sub topic to publish to, in the format projects//topics/. (Example: projects/your-project-id/topics/your-topic-name). +* **driverClassName**: The JDBC driver class name. For example, `com.mysql.jdbc.Driver`. +* **connectionUrl**: The JDBC connection URL string. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example: 'echo -n "jdbc:mysql://some-host:3306/sampledb" | gcloud kms encrypt --location= --keyring= --key= --plaintext-file=- --ciphertext-file=- | base64' For example, `jdbc:mysql://some-host:3306/sampledb`. +* **driverJars**: Comma-separated Cloud Storage paths for JDBC drivers. For example, `gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar`. +* **query**: The query to run on the source to extract the data. For example, `select * from sampledb.sample_table`. +* **outputTopic**: The Pub/Sub topic to publish to. For example, `projects//topics/`. ### Optional parameters -* **username** : The username to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_username' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. -* **password** : The password to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_password' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. -* **connectionProperties** : The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`. (Example: unicode=true;characterEncoding=UTF-8). -* **KMSEncryptionKey** : The Cloud KMS Encryption Key to use to decrypt the username, password, and connection string. If a Cloud KMS key is passed in, the username, password, and connection string must all be passed in encrypted and base64 encoded. (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **disabledAlgorithms** : Comma separated algorithms to disable. If this value is set to none, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. (Example: SSLv3, RC4). -* **extraFilesToStage** : Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. (Example: gs:///file.txt,projects//secrets//versions/). +* **username**: The username to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_username' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. +* **password**: The password to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. For example, `echo -n 'some_password' | glcloud kms encrypt --location=my_location --keyring=mykeyring --key=mykey --plaintext-file=- --ciphertext-file=- | base64`. +* **connectionProperties**: The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`. For example, `unicode=true;characterEncoding=UTF-8`. +* **KMSEncryptionKey**: The Cloud KMS Encryption Key to use to decrypt the username, password, and connection string. If a Cloud KMS key is passed in, the username, password, and connection string must all be passed in encrypted and base64 encoded. For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **disabledAlgorithms**: Comma separated algorithms to disable. If this value is set to `none`, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. For example, `SSLv3, RC4`. +* **extraFilesToStage**: Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. For example, `gs:///file.txt,projects//secrets//versions/`. @@ -223,17 +223,17 @@ resource "google_dataflow_flex_template_job" "jdbc_to_pubsub" { name = "jdbc-to-pubsub" region = var.region parameters = { - driverClassName = "com.mysql.jdbc.Driver" - connectionUrl = "jdbc:mysql://some-host:3306/sampledb" - driverJars = "gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar" - query = "select * from sampledb.sample_table" - outputTopic = "projects/your-project-id/topics/your-topic-name" + driverClassName = "" + connectionUrl = "" + driverJars = "" + query = "" + outputTopic = "" # username = "" # password = "" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" - # disabledAlgorithms = "SSLv3, RC4" - # extraFilesToStage = "gs:///file.txt,projects//secrets//versions/" + # connectionProperties = "" + # KMSEncryptionKey = "" + # disabledAlgorithms = "" + # extraFilesToStage = "" } } ``` diff --git a/v2/sqlserver-to-googlecloud/README_Jdbc_to_PubSub_Auto.md b/v2/sqlserver-to-googlecloud/README_Jdbc_to_PubSub_Auto.md index 458d522dc3..8441e93d01 100644 --- a/v2/sqlserver-to-googlecloud/README_Jdbc_to_PubSub_Auto.md +++ b/v2/sqlserver-to-googlecloud/README_Jdbc_to_PubSub_Auto.md @@ -15,23 +15,23 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **driverClassName** : JDBC driver class name to use. (Example: com.mysql.jdbc.Driver). -* **connectionUrl** : Url connection string to connect to the JDBC source. Connection string can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. (Example: jdbc:mysql://some-host:3306/sampledb). -* **driverJars** : Comma separate Cloud Storage paths for JDBC drivers. (Example: gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar). -* **query** : Query to be executed on the source to extract the data. (Example: select * from sampledb.sample_table). -* **outputTopic** : The name of the topic to which data should published, in the format of 'projects/your-project-id/topics/your-topic-name' (Example: projects/your-project-id/topics/your-topic-name). +* **driverClassName**: JDBC driver class name to use. For example, `com.mysql.jdbc.Driver`. +* **connectionUrl**: Url connection string to connect to the JDBC source. Connection string can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. For example, `jdbc:mysql://some-host:3306/sampledb`. +* **driverJars**: Comma separate Cloud Storage paths for JDBC drivers. For example, `gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar`. +* **query**: Query to be executed on the source to extract the data. For example, `select * from sampledb.sample_table`. +* **outputTopic**: The name of the topic to publish data to. For example, `projects//topics/`. ### Optional parameters -* **username** : User name to be used for the JDBC connection. User name can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. -* **password** : Password to be used for the JDBC connection. Password can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. -* **connectionProperties** : Properties string to use for the JDBC connection. Format of the string must be [propertyName=property;]*. (Example: unicode=true;characterEncoding=UTF-8). -* **KMSEncryptionKey** : If this parameter is provided, password, user name and connection string should all be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **partitionColumn** : If this parameter is provided (along with `table`), JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only Long partition columns are supported. -* **table** : Table to read from using partitions. This parameter also accepts a subquery in parentheses. (Example: (select id, name from Person) as subq). -* **numPartitions** : The number of partitions. This, along with the lower and upper bound, form partitions strides for generated WHERE clause expressions used to split the partition column evenly. When the input is less than 1, the number is set to 1. -* **lowerBound** : Lower bound used in the partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). -* **upperBound** : Upper bound used in partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). +* **username**: User name to be used for the JDBC connection. User name can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. +* **password**: Password to be used for the JDBC connection. Password can be passed in as plaintext or as a base64 encoded string encrypted by Google Cloud KMS. +* **connectionProperties**: Properties string to use for the JDBC connection. Format of the string must be [propertyName=property;]*. For example, `unicode=true;characterEncoding=UTF-8`. +* **KMSEncryptionKey**: If this parameter is provided, password, user name and connection string should all be passed in encrypted. Encrypt parameters using the KMS API encrypt endpoint. See: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **partitionColumn**: If this parameter is provided (along with `table`), JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only Long partition columns are supported. +* **table**: Table to read from using partitions. This parameter also accepts a subquery in parentheses. For example, `(select id, name from Person) as subq`. +* **numPartitions**: The number of partitions. This, along with the lower and upper bound, form partitions strides for generated WHERE clause expressions used to split the partition column evenly. When the input is less than 1, the number is set to 1. +* **lowerBound**: Lower bound used in the partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). +* **upperBound**: Upper bound used in partition scheme. If not provided, it is automatically inferred by Beam (for the supported types). @@ -232,17 +232,17 @@ resource "google_dataflow_flex_template_job" "jdbc_to_pubsub_auto" { name = "jdbc-to-pubsub-auto" region = var.region parameters = { - driverClassName = "com.mysql.jdbc.Driver" - connectionUrl = "jdbc:mysql://some-host:3306/sampledb" - driverJars = "gs://your-bucket/driver_jar1.jar,gs://your-bucket/driver_jar2.jar" - query = "select * from sampledb.sample_table" - outputTopic = "projects/your-project-id/topics/your-topic-name" + driverClassName = "" + connectionUrl = "" + driverJars = "" + query = "" + outputTopic = "" # username = "" # password = "" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" + # connectionProperties = "" + # KMSEncryptionKey = "" # partitionColumn = "" - # table = "(select id, name from Person) as subq" + # table = "
" # numPartitions = "" # lowerBound = "" # upperBound = "" diff --git a/v2/sqlserver-to-googlecloud/README_SQLServer_to_BigQuery.md b/v2/sqlserver-to-googlecloud/README_SQLServer_to_BigQuery.md index 6278b5f3b3..0e040ea9c4 100644 --- a/v2/sqlserver-to-googlecloud/README_SQLServer_to_BigQuery.md +++ b/v2/sqlserver-to-googlecloud/README_SQLServer_to_BigQuery.md @@ -23,34 +23,31 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **connectionURL** : The JDBC connection URL string. Can be passed in as a string that's Base64-encoded and then encrypted with a Cloud KMS key, or can be a Secret Manager secret in the form projects/{project}/secrets/{secret}/versions/{secret_version}. (Example: jdbc:sqlserver://localhost;databaseName=sampledb). -* **outputTable** : The BigQuery output table location. (Example: :.). -* **bigQueryLoadingTemporaryDirectory** : The temporary directory for the BigQuery loading process. (Example: gs://your-bucket/your-files/temp_dir). +* **connectionURL**: The JDBC connection URL string. Can be passed in as a string that's Base64-encoded and then encrypted with a Cloud KMS key. For example, `jdbc:sqlserver://localhost;databaseName=sampledb`. +* **outputTable**: The BigQuery output table location. For example, `:.`. +* **bigQueryLoadingTemporaryDirectory**: The temporary directory for the BigQuery loading process. For example, `gs://your-bucket/your-files/temp_dir`. ### Optional parameters -* **connectionProperties** : The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`.For more information, see Configuration Properties (https://dev.mysql.com/doc/connector-j/en/connector-j-reference-configuration-properties.html) in the MySQL documentation. (Example: unicode=true;characterEncoding=UTF-8). -* **username** : The username to use for the JDBC connection. Can be passed in as a string that's encrypted with a Cloud KMS key, or can be a Secret Manager secret in the form projects/{project}/secrets/{secret}/versions/{secret_version}. -* **password** : The password to use for the JDBC connection. Can be passed in as a string that's encrypted with a Cloud KMS key, or can be a Secret Manager secret in the form projects/{project}/secrets/{secret}/versions/{secret_version}. -* **query** : The query to run on the source to extract the data. Note that some JDBC SQL and BigQuery types, although sharing the same name, have some differences. Some important SQL -> BigQuery type mappings to keep in mind are: -DATETIME --> TIMESTAMP - -Type casting may be required if your schemas do not match. This parameter can be set to a gs:// path pointing to a file in Cloud Storage to load the query from. The file encoding should be UTF-8. (Example: select * from sampledb.sample_table). -* **KMSEncryptionKey** : The Cloud KMS encryption key to use to decrypt the username, password, and connection string. If you pass in a Cloud KMS key, you must also encrypt the username, password, and connection string. (Example: projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key). -* **useColumnAlias** : If set to `true`, the pipeline uses the column alias (`AS`) instead of the column name to map the rows to BigQuery. Defaults to `false`. -* **isTruncate** : If set to `true`, the pipeline truncates before loading data into BigQuery. Defaults to `false`, which causes the pipeline to append data. -* **partitionColumn** : If this parameter is provided with the name of the `table` defined as an optional parameter, JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only supports `Long` partition columns. -* **table** : The table to read from when using partitions. This parameter also accepts a subquery in parentheses. (Example: (select id, name from Person) as subq). -* **numPartitions** : The number of partitions. With the lower and upper bound, this value forms partition strides for generated `WHERE` clause expressions that are used to split the partition column evenly. When the input is less than `1`, the number is set to `1`. -* **lowerBound** : The lower bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. -* **upperBound** : The upper bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. -* **fetchSize** : The number of rows to be fetched from database at a time. Not used for partitioned reads. Defaults to: 50000. -* **createDisposition** : The BigQuery CreateDisposition to use. For example, `CREATE_IF_NEEDED` or `CREATE_NEVER`. Defaults to: CREATE_NEVER. -* **bigQuerySchemaPath** : The Cloud Storage path for the BigQuery JSON schema. If `createDisposition` is set to CREATE_IF_NEEDED, this parameter must be specified. (Example: gs://your-bucket/your-schema.json). -* **disabledAlgorithms** : Comma separated algorithms to disable. If this value is set to none, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. (Example: SSLv3, RC4). -* **extraFilesToStage** : Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. (Example: gs:///file.txt,projects//secrets//versions/). -* **useStorageWriteApi** : If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). -* **useStorageWriteApiAtLeastOnce** : When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. +* **connectionProperties**: The properties string to use for the JDBC connection. The format of the string must be `[propertyName=property;]*`.For more information, see Configuration Properties (https://dev.mysql.com/doc/connector-j/8.1/en/connector-j-reference-configuration-properties.html) in the MySQL documentation. For example, `unicode=true;characterEncoding=UTF-8`. +* **username**: The username to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. Remove whitespace characters from the Base64-encoded string. +* **password**: The password to use for the JDBC connection. You can pass in this value as a string that's encrypted with a Cloud KMS key and then Base64-encoded. Remove whitespace characters from the Base64-encoded string. +* **query**: The query to run on the source to extract the data. Note that some JDBC SQL and BigQuery types, although sharing the same name, have some differences. Some important SQL -> BigQuery type mappings to keep in mind are `DATETIME --> TIMESTAMP`. Type casting may be required if your schemas do not match. For example, `select * from sampledb.sample_table`. +* **KMSEncryptionKey**: The Cloud KMS encryption key to use to decrypt the username, password, and connection string. If you pass in a Cloud KMS key, you must also encrypt the username, password, and connection string. For example, `projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key`. +* **useColumnAlias**: If set to `true`, the pipeline uses the column alias (`AS`) instead of the column name to map the rows to BigQuery. Defaults to `false`. +* **isTruncate**: If set to `true`, the pipeline truncates before loading data into BigQuery. Defaults to `false`, which causes the pipeline to append data. +* **partitionColumn**: If this parameter is provided with the name of the `table` defined as an optional parameter, JdbcIO reads the table in parallel by executing multiple instances of the query on the same table (subquery) using ranges. Currently, only supports `Long` partition columns. +* **table**: The table to read from when using partitions. This parameter also accepts a subquery in parentheses. For example, `(select id, name from Person) as subq`. +* **numPartitions**: The number of partitions. With the lower and upper bound, this value forms partition strides for generated `WHERE` clause expressions that are used to split the partition column evenly. When the input is less than `1`, the number is set to `1`. +* **lowerBound**: The lower bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. +* **upperBound**: The upper bound to use in the partition scheme. If not provided, this value is automatically inferred by Apache Beam for the supported types. +* **fetchSize**: The number of rows to be fetched from database at a time. Not used for partitioned reads. Defaults to: 50000. +* **createDisposition**: The BigQuery CreateDisposition to use. For example, `CREATE_IF_NEEDED` or `CREATE_NEVER`. Defaults to: CREATE_NEVER. +* **bigQuerySchemaPath**: The Cloud Storage path for the BigQuery JSON schema. If `createDisposition` is set to `CREATE_IF_NEEDED`, this parameter must be specified. For example, `gs://your-bucket/your-schema.json`. +* **disabledAlgorithms**: Comma separated algorithms to disable. If this value is set to `none`, no algorithm is disabled. Use this parameter with caution, because the algorithms disabled by default might have vulnerabilities or performance issues. For example, `SSLv3, RC4`. +* **extraFilesToStage**: Comma separated Cloud Storage paths or Secret Manager secrets for files to stage in the worker. These files are saved in the /extra_files directory in each worker. For example, `gs:///file.txt,projects//secrets//versions/`. +* **useStorageWriteApi**: If `true`, the pipeline uses the BigQuery Storage Write API (https://cloud.google.com/bigquery/docs/write-api). The default value is `false`. For more information, see Using the Storage Write API (https://beam.apache.org/documentation/io/built-in/google-bigquery/#storage-write-api). +* **useStorageWriteApiAtLeastOnce**: When using the Storage Write API, specifies the write semantics. To use at-least-once semantics (https://beam.apache.org/documentation/io/built-in/google-bigquery/#at-least-once-semantics), set this parameter to `true`. To use exactly-once semantics, set the parameter to `false`. This parameter applies only when `useStorageWriteApi` is `true`. The default value is `false`. @@ -275,26 +272,26 @@ resource "google_dataflow_flex_template_job" "sqlserver_to_bigquery" { name = "sqlserver-to-bigquery" region = var.region parameters = { - connectionURL = "jdbc:sqlserver://localhost;databaseName=sampledb" - outputTable = ":." - bigQueryLoadingTemporaryDirectory = "gs://your-bucket/your-files/temp_dir" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" + connectionURL = "" + outputTable = "" + bigQueryLoadingTemporaryDirectory = "" + # connectionProperties = "" # username = "" # password = "" - # query = "select * from sampledb.sample_table" - # KMSEncryptionKey = "projects/your-project/locations/global/keyRings/your-keyring/cryptoKeys/your-key" + # query = "" + # KMSEncryptionKey = "" # useColumnAlias = "false" # isTruncate = "false" # partitionColumn = "" - # table = "(select id, name from Person) as subq" + # table = "
" # numPartitions = "" # lowerBound = "" # upperBound = "" # fetchSize = "50000" # createDisposition = "CREATE_NEVER" - # bigQuerySchemaPath = "gs://your-bucket/your-schema.json" - # disabledAlgorithms = "SSLv3, RC4" - # extraFilesToStage = "gs:///file.txt,projects//secrets//versions/" + # bigQuerySchemaPath = "" + # disabledAlgorithms = "" + # extraFilesToStage = "" # useStorageWriteApi = "false" # useStorageWriteApiAtLeastOnce = "false" } diff --git a/v2/streaming-data-generator/README_Streaming_Data_Generator.md b/v2/streaming-data-generator/README_Streaming_Data_Generator.md index 0d4e40e71b..0ac35a9c05 100644 --- a/v2/streaming-data-generator/README_Streaming_Data_Generator.md +++ b/v2/streaming-data-generator/README_Streaming_Data_Generator.md @@ -17,40 +17,40 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **qps** : Indicates rate of messages per second to be published to Pub/Sub. +* **qps**: Indicates rate of messages per second to be published to Pub/Sub. ### Optional parameters -* **schemaTemplate** : Pre-existing schema template to use. The value must be one of: [GAME_EVENT]. -* **schemaLocation** : Cloud Storage path of schema location. (Example: gs:///prefix). -* **topic** : The name of the topic to which the pipeline should publish data. (Example: projects//topics/). -* **messagesLimit** : Indicates maximum number of output messages to be generated. 0 means unlimited. Defaults to: 0. -* **outputType** : The message Output type. Default is JSON. -* **avroSchemaLocation** : Cloud Storage path of Avro schema location. Mandatory when output type is AVRO or PARQUET. (Example: gs://your-bucket/your-path/schema.avsc). -* **sinkType** : The message Sink type. Default is PUBSUB. -* **outputTableSpec** : Output BigQuery table. Mandatory when sinkType is BIGQUERY (Example: :.). -* **writeDisposition** : BigQuery WriteDisposition. For example, WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE. Defaults to: WRITE_APPEND. -* **outputDeadletterTable** : Messages failed to reach the output table for all kind of reasons (e.g., mismatched schema, malformed json) are written to this table. If it doesn't exist, it will be created during pipeline execution. (Example: your-project-id:your-dataset.your-table-name). -* **windowDuration** : The window duration/size in which data will be written to Cloud Storage. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). (Example: 1m). Defaults to: 1m. -* **outputDirectory** : The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse directory path for date & time formatters. (Example: gs://your-bucket/your-path/). -* **outputFilenamePrefix** : The prefix to place on each windowed file. (Example: output-). Defaults to: output-. -* **numShards** : The maximum number of output shards produced when writing. A higher number of shards means higher throughput for writing to Cloud Storage, but potentially higher data aggregation cost across shards when processing output Cloud Storage files. Default value is decided by Dataflow. -* **driverClassName** : JDBC driver class name to use. (Example: com.mysql.jdbc.Driver). -* **connectionUrl** : Url connection string to connect to the JDBC source. (Example: jdbc:mysql://some-host:3306/sampledb). -* **username** : User name to be used for the JDBC connection. -* **password** : Password to be used for the JDBC connection. -* **connectionProperties** : Properties string to use for the JDBC connection. Format of the string must be [propertyName=property;]*. (Example: unicode=true;characterEncoding=UTF-8). -* **statement** : SQL statement which will be executed to write to the database. The statement must specify the column names of the table in any order. Only the values of the specified column names will be read from the json and added to the statement. (Example: INSERT INTO tableName (column1, column2) VALUES (?,?)). -* **projectId** : GCP Project Id of where the Spanner table lives. -* **spannerInstanceName** : Cloud Spanner instance name. -* **spannerDatabaseName** : Cloud Spanner database name. -* **spannerTableName** : Cloud Spanner table name. -* **maxNumMutations** : Specifies the cell mutation limit (maximum number of mutated cells per batch). Default value is 5000. -* **maxNumRows** : Specifies the row mutation limit (maximum number of mutated rows per batch). Default value is 1000. -* **batchSizeBytes** : Specifies the batch size limit (max number of bytes mutated per batch). Default value is 1MB. -* **commitDeadlineSeconds** : Specifies the deadline in seconds for the Commit API call. -* **bootstrapServer** : Kafka Bootstrap Server (Example: localhost:9092). -* **kafkaTopic** : Kafka topic to write to. (Example: topic). +* **schemaTemplate**: Pre-existing schema template to use. The value must be one of: [GAME_EVENT]. +* **schemaLocation**: Cloud Storage path of schema location. For example, `gs:///prefix`. +* **topic**: The name of the topic to which the pipeline should publish data. For example, `projects//topics/`. +* **messagesLimit**: Indicates maximum number of output messages to be generated. 0 means unlimited. Defaults to: 0. +* **outputType**: The message Output type. Default is JSON. +* **avroSchemaLocation**: Cloud Storage path of Avro schema location. Mandatory when output type is AVRO or PARQUET. For example, `gs://your-bucket/your-path/schema.avsc`. +* **sinkType**: The message Sink type. Default is PUBSUB. +* **outputTableSpec**: Output BigQuery table. Mandatory when sinkType is BIGQUERY For example, `:.`. +* **writeDisposition**: BigQuery WriteDisposition. For example, WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE. Defaults to: WRITE_APPEND. +* **outputDeadletterTable**: Messages failed to reach the output table for all kind of reasons (e.g., mismatched schema, malformed json) are written to this table. If it doesn't exist, it will be created during pipeline execution. For example, `your-project-id:your-dataset.your-table-name`. +* **windowDuration**: The window duration/size in which data will be written to Cloud Storage. Allowed formats are: Ns (for seconds, example: 5s), Nm (for minutes, example: 12m), Nh (for hours, example: 2h). For example, `1m`. Defaults to: 1m. +* **outputDirectory**: The path and filename prefix for writing output files. Must end with a slash. DateTime formatting is used to parse directory path for date & time formatters. For example, `gs://your-bucket/your-path/`. +* **outputFilenamePrefix**: The prefix to place on each windowed file. For example, `output-`. Defaults to: output-. +* **numShards**: The maximum number of output shards produced when writing. A higher number of shards means higher throughput for writing to Cloud Storage, but potentially higher data aggregation cost across shards when processing output Cloud Storage files. Default value is decided by Dataflow. +* **driverClassName**: JDBC driver class name to use. For example, `com.mysql.jdbc.Driver`. +* **connectionUrl**: Url connection string to connect to the JDBC source. For example, `jdbc:mysql://some-host:3306/sampledb`. +* **username**: User name to be used for the JDBC connection. +* **password**: Password to be used for the JDBC connection. +* **connectionProperties**: Properties string to use for the JDBC connection. Format of the string must be [propertyName=property;]*. For example, `unicode=true;characterEncoding=UTF-8`. +* **statement**: SQL statement which will be executed to write to the database. The statement must specify the column names of the table in any order. Only the values of the specified column names will be read from the json and added to the statement. For example, `INSERT INTO tableName (column1, column2) VALUES (?,?)`. +* **projectId**: GCP Project Id of where the Spanner table lives. +* **spannerInstanceName**: Cloud Spanner instance name. +* **spannerDatabaseName**: Cloud Spanner database name. +* **spannerTableName**: Cloud Spanner table name. +* **maxNumMutations**: Specifies the cell mutation limit (maximum number of mutated cells per batch). Default value is 5000. +* **maxNumRows**: Specifies the row mutation limit (maximum number of mutated rows per batch). Default value is 1000. +* **batchSizeBytes**: Specifies the batch size limit (max number of bytes mutated per batch). Default value is 1MB. +* **commitDeadlineSeconds**: Specifies the deadline in seconds for the Commit API call. +* **bootstrapServer**: Kafka Bootstrap Server For example, `localhost:9092`. +* **kafkaTopic**: Kafka topic to write to. For example, `topic`. @@ -304,25 +304,25 @@ resource "google_dataflow_flex_template_job" "streaming_data_generator" { parameters = { qps = "" # schemaTemplate = "" - # schemaLocation = "gs:///prefix" - # topic = "projects//topics/" + # schemaLocation = "" + # topic = "" # messagesLimit = "0" # outputType = "JSON" - # avroSchemaLocation = "gs://your-bucket/your-path/schema.avsc" + # avroSchemaLocation = "" # sinkType = "PUBSUB" - # outputTableSpec = ":." + # outputTableSpec = "" # writeDisposition = "WRITE_APPEND" - # outputDeadletterTable = "your-project-id:your-dataset.your-table-name" + # outputDeadletterTable = "" # windowDuration = "1m" - # outputDirectory = "gs://your-bucket/your-path/" + # outputDirectory = "" # outputFilenamePrefix = "output-" # numShards = "0" - # driverClassName = "com.mysql.jdbc.Driver" - # connectionUrl = "jdbc:mysql://some-host:3306/sampledb" + # driverClassName = "" + # connectionUrl = "" # username = "" # password = "" - # connectionProperties = "unicode=true;characterEncoding=UTF-8" - # statement = "INSERT INTO tableName (column1, column2) VALUES (?,?)" + # connectionProperties = "" + # statement = "" # projectId = "" # spannerInstanceName = "" # spannerDatabaseName = "" @@ -331,8 +331,8 @@ resource "google_dataflow_flex_template_job" "streaming_data_generator" { # maxNumRows = "" # batchSizeBytes = "" # commitDeadlineSeconds = "" - # bootstrapServer = "localhost:9092" - # kafkaTopic = "topic" + # bootstrapServer = "" + # kafkaTopic = "" } } ``` diff --git a/yaml/README_Kafka_to_BigQuery_Yaml.md b/yaml/README_Kafka_to_BigQuery_Yaml.md index c1f4fcec75..dc38e0752c 100644 --- a/yaml/README_Kafka_to_BigQuery_Yaml.md +++ b/yaml/README_Kafka_to_BigQuery_Yaml.md @@ -21,17 +21,17 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat ### Required parameters -* **outputTableSpec** : BigQuery table location to write the output to. The name should be in the format `:.`. The table's schema must match input objects. +* **outputTableSpec**: BigQuery table location to write the output to. The name should be in the format `:.`. The table's schema must match input objects. ### Optional parameters -* **readBootstrapServers** : Kafka Bootstrap Server list, separated by commas. (Example: localhost:9092,127.0.0.1:9093). -* **kafkaReadTopics** : Kafka topic(s) to read input from. (Example: topic1,topic2). -* **outputDeadletterTable** : BigQuery table for failed messages. Messages failed to reach the output table for different reasons (e.g., mismatched schema, malformed json) are written to this table. If it doesn't exist, it will be created during pipeline execution. If not specified, "outputTableSpec_error_records" is used instead. (Example: your-project-id:your-dataset.your-table-name). -* **messageFormat** : The message format. Can be AVRO or JSON. Defaults to: JSON. -* **schema** : Kafka schema. A schema is required if data format is JSON, AVRO or PROTO. -* **numStorageWriteApiStreams** : Number of streams defines the parallelism of the BigQueryIO’s Write transform and roughly corresponds to the number of Storage Write API’s streams which will be used by the pipeline. See https://cloud.google.com/blog/products/data-analytics/streaming-data-into-bigquery-using-storage-write-api for the recommended values. Defaults to: 1. -* **storageWriteApiTriggeringFrequencySec** : Triggering frequency will determine how soon the data will be visible for querying in BigQuery. See https://cloud.google.com/blog/products/data-analytics/streaming-data-into-bigquery-using-storage-write-api for the recommended values. Defaults to: 1. +* **readBootstrapServers**: Kafka Bootstrap Server list, separated by commas. For example, `localhost:9092,127.0.0.1:9093`. +* **kafkaReadTopics**: Kafka topic(s) to read input from. For example, `topic1,topic2`. +* **outputDeadletterTable**: BigQuery table for failed messages. Messages failed to reach the output table for different reasons (e.g., mismatched schema, malformed json) are written to this table. If it doesn't exist, it will be created during pipeline execution. If not specified, "outputTableSpec_error_records" is used instead. For example, `your-project-id:your-dataset.your-table-name`. +* **messageFormat**: The message format. Can be AVRO or JSON. Defaults to: JSON. +* **schema**: Kafka schema. A schema is required if data format is JSON, AVRO or PROTO. +* **numStorageWriteApiStreams**: Number of streams defines the parallelism of the BigQueryIO’s Write transform and roughly corresponds to the number of Storage Write API’s streams which will be used by the pipeline. See https://cloud.google.com/blog/products/data-analytics/streaming-data-into-bigquery-using-storage-write-api for the recommended values. Defaults to: 1. +* **storageWriteApiTriggeringFrequencySec**: Triggering frequency will determine how soon the data will be visible for querying in BigQuery. See https://cloud.google.com/blog/products/data-analytics/streaming-data-into-bigquery-using-storage-write-api for the recommended values. Defaults to: 1. @@ -215,9 +215,9 @@ resource "google_dataflow_flex_template_job" "kafka_to_bigquery_yaml" { region = var.region parameters = { outputTableSpec = "" - # readBootstrapServers = "localhost:9092,127.0.0.1:9093" - # kafkaReadTopics = "topic1,topic2" - # outputDeadletterTable = "your-project-id:your-dataset.your-table-name" + # readBootstrapServers = "" + # kafkaReadTopics = "" + # outputDeadletterTable = "" # messageFormat = "JSON" # schema = "" # numStorageWriteApiStreams = "1"