Skip to content

Commit

Permalink
changed the descriptions as per code suggestions
Browse files Browse the repository at this point in the history
  • Loading branch information
sharan-malyala committed Nov 11, 2024
2 parents 0faf929 + 0daaae1 commit c173f1e
Show file tree
Hide file tree
Showing 7 changed files with 24 additions and 23 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ public interface Options
order = 1,
groupName = "Source",
description = "Pub/Sub input subscription",
helpText = "Pub/Sub subscription to read the input from.",
helpText = "The Pub/Sub subscription to read the input from.",
example = "projects/<PROJECT_ID>/subscriptions/<SUBSCRIPTION_NAME>")
ValueProvider<String> getInputSubscription();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ public interface CdcApplierOptions extends PipelineOptions, BigQueryStorageApiSt
optional = true,
description = "Whether to use a single topic for all MySQL table changes.",
helpText =
"Set this to `true` if you have configured your Debezium connector to publish all table"
"Set this to `true` if you configure your Debezium connector to publish all table"
+ " updates to a single topic")
@Default.Boolean(false)
Boolean getUseSingleTopic();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ public interface CsvPipelineOptions extends PipelineOptions {
order = 1,
description = "The input filepattern to read from.",
helpText =
"The Cloud Storage file pattern to search for CSV files. Example: `gs://mybucket/test-*.csv`.")
"The Cloud Storage file pattern to search for CSV files. For example, `gs://mybucket/test-*.csv`.")
String getInputFileSpec();

void setInputFileSpec(String inputFileSpec);
Expand Down Expand Up @@ -214,7 +214,7 @@ public interface CsvPipelineOptions extends PipelineOptions {
order = 4,
optional = true,
description = "Column delimiter of the data files.",
helpText = "The column delimiter of the input text files. Defaults to `,`",
helpText = "The column delimiter of the input text files. Default: `,`",
example = ",")
@Default.InstanceFactory(DelimiterFactory.class)
String getDelimiter();
Expand All @@ -226,7 +226,7 @@ public interface CsvPipelineOptions extends PipelineOptions {
optional = true,
description = "CSV Format to use for parsing records.",
helpText =
"The CSV format specification to use for parsing records. Defaults to `Default`. See https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.html for more details. Must match format names exactly found at: "
"CSV format specification to use for parsing records. Default is: `Default`. See https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.html for more details. Must match format names exactly found at: "
+ "https://commons.apache.org/proper/commons-csv/apidocs/org/apache/commons/csv/CSVFormat.Predefined.html")
@Default.String("Default")
String getCsvFormat();
Expand Down Expand Up @@ -271,7 +271,8 @@ public interface CsvPipelineOptions extends PipelineOptions {
optional = true,
description = "Log detailed CSV conversion errors",
helpText =
"Set to `true` to enable detailed error logging when CSV parsing fails. This setting might expose sensitive data in the logs, such as password contained in the CSV file. Defaults to `false`.")
"Set to `true` to enable detailed error logging when CSV parsing fails. Note that this may expose sensitive data in the logs (e.g., if the CSV file contains passwords)."
+ " Default: `false`.")
@Default.Boolean(false)
Boolean getLogDetailedCsvConversionErrors();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ public interface Options
groupName = "Source",
description = "File location for Datastream file output in Cloud Storage.",
helpText =
"The file location for Datastream file output in Cloud Storage, in the format: `gs://<BUCKET_NAME>/<ROOT_PATH>/`.")
"The file location for Datastream file output in Cloud Storage, in the format `gs://<BUCKET_NAME>/<ROOT_PATH>/`.")
String getInputFilePattern();

void setInputFilePattern(String value);
Expand All @@ -159,7 +159,7 @@ public interface Options
enumOptions = {@TemplateEnumOption("avro"), @TemplateEnumOption("json")},
description = "Datastream output file format (avro/json).",
helpText =
"The format of the output files produced by Datastream. Value can be `avro` or `json`. Defaults to: `avro`.")
"The format of the output files produced by Datastream. Allowed values are `avro` and `json`. Defaults to `avro`.")
@Default.String("avro")
String getInputFileFormat();

Expand Down Expand Up @@ -223,7 +223,7 @@ public interface Options
groupName = "Target",
description = "Name or template for the dataset to contain staging tables.",
helpText =
"The name of the dataset that contains staging tables. This parameter supports templates, for example `{_metadata_dataset}_log` or `my_dataset_log`. Normally, this parameter is a dataset name. Defaults to: `{_metadata_dataset}`.")
"The name of the dataset that contains staging tables. This parameter supports templates, for example `{_metadata_dataset}_log` or `my_dataset_log`. Normally, this parameter is a dataset name. Defaults to `{_metadata_dataset}`.")
@Default.String("{_metadata_dataset}")
String getOutputStagingDatasetTemplate();

Expand All @@ -235,7 +235,7 @@ public interface Options
groupName = "Target",
description = "Template for the name of staging tables.",
helpText =
"The template to use to name the staging tables. For example, `{_metadata_table}`. Defaults to: `{_metadata_table}_log`.")
"The template to use to name the staging tables. For example, `{_metadata_table}`. Defaults to `{_metadata_table}_log`.")
@Default.String("{_metadata_table}_log")
String getOutputStagingTableNameTemplate();

Expand All @@ -246,7 +246,7 @@ public interface Options
groupName = "Target",
description = "Template for the dataset to contain replica tables.",
helpText =
"The name of the dataset that contains the replica tables. This parameter supports templates, for example `{_metadata_dataset}` or `my_dataset`. Normally, this parameter is a dataset name. Defaults to: `{_metadata_dataset}`.")
"The name of the dataset that contains the replica tables. This parameter supports templates, for example `{_metadata_dataset}` or `my_dataset`. Normally, this parameter is a dataset name. Defaults to `{_metadata_dataset}`.")
@Default.String("{_metadata_dataset}")
String getOutputDatasetTemplate();

Expand All @@ -258,7 +258,7 @@ public interface Options
optional = true,
description = "Template for the name of replica tables.",
helpText =
"The template to use for the name of the replica tables, for example `{_metadata_table}`. Defaults to: `{_metadata_table}`.")
"The template to use for the name of the replica tables, for example `{_metadata_table}`. Defaults to `{_metadata_table}`.")
@Default.String("{_metadata_table}")
String getOutputTableNameTemplate();

Expand All @@ -283,7 +283,7 @@ public interface Options
order = 13,
optional = true,
description = "The number of minutes between merges for a given table",
helpText = "The number of minutes between merges for a given table. Defaults to: `5`.")
helpText = "The number of minutes between merges for a given table. Defaults to `5`.")
@Default.Integer(5)
Integer getMergeFrequencyMinutes();

Expand All @@ -303,7 +303,7 @@ public interface Options
order = 15,
optional = true,
description = "The number of minutes between DLQ Retries.",
helpText = "The number of minutes between DLQ Retries. Defaults to: `10`.")
helpText = "The number of minutes between DLQ Retries. Defaults to `10`.")
@Default.Integer(10)
Integer getDlqRetryMinutes();

Expand All @@ -323,7 +323,7 @@ public interface Options
order = 17,
optional = true,
description = "A switch to disable MERGE queries for the job.",
helpText = "Whether to disable MERGE queries for the job. Defaults to: `true`.")
helpText = "Whether to disable MERGE queries for the job. Defaults to `true`.")
@Default.Boolean(true)
Boolean getApplyMerge();

Expand All @@ -336,7 +336,7 @@ public interface Options
parentTriggerValues = {"true"},
description = "Concurrent queries for merge.",
helpText =
"The number of concurrent BigQuery MERGE queries. Only effective when applyMerge is set to true. Defaults to: `30`.")
"The number of concurrent BigQuery MERGE queries. Only effective when applyMerge is set to true. Defaults to `30`.")
@Default.Integer(MergeConfiguration.DEFAULT_MERGE_CONCURRENCY)
Integer getMergeConcurrency();

Expand All @@ -347,7 +347,7 @@ public interface Options
optional = true,
description = "Partition retention days.",
helpText =
"The number of days to use for partition retention when running BigQuery merges. Defaults to: `1`.")
"The number of days to use for partition retention when running BigQuery merges. Defaults to `1`.")
@Default.Integer(MergeConfiguration.DEFAULT_PARTITION_RETENTION_DAYS)
Integer getPartitionRetentionDays();

Expand All @@ -360,7 +360,7 @@ public interface Options
parentTriggerValues = {"true"},
description = "Use at at-least-once semantics in BigQuery Storage Write API",
helpText =
"This parameter takes effect only if `Use BigQuery Storage Write API` is enabled. If `true`, at-least-once semantics are used for the Storage Write API. Otherwise, exactly-once semantics are used. Defaults to: `false`.",
"This parameter takes effect only if `Use BigQuery Storage Write API` is enabled. If `true`, at-least-once semantics are used for the Storage Write API. Otherwise, exactly-once semantics are used. Defaults to `false`.",
hiddenUi = true)
@Default.Boolean(false)
@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,11 @@ on [Metadata Annotations](https://github.com/GoogleCloudPlatform/DataflowTemplat

### Optional parameters

* **inputFileFormat** : The format of the output file produced by Datastream. For example `avro,json`. Defaults to: `avro`.
* **inputFileFormat** : The format of the output file produced by Datastream. For example `avro,json`. Defaults to `avro`.
* **sessionFilePath** : Session file path in Cloud Storage that contains mapping information from HarbourBridge.
* **projectId** : The Spanner project ID.
* **spannerHost** : The Cloud Spanner endpoint to call in the template. (Example: https://batch-spanner.googleapis.com). Defaults to: https://batch-spanner.googleapis.com.
* **gcsPubSubSubscription** : The Pub/Sub subscription being used in a Cloud Storage notification policy. For the name, use the format of `projects/<PROJECT_ID>/subscriptions/<SUBSCRIPTION_NAME>`.
* **gcsPubSubSubscription** : The Pub/Sub subscription being used in a Cloud Storage notification policy. For the name, use the format `projects/<PROJECT_ID>/subscriptions/<SUBSCRIPTION_NAME>`.
* **shadowTablePrefix** : The prefix used to name shadow tables. Default: `shadow_`.
* **shouldCreateShadowTables** : This flag indicates whether shadow tables must be created in Cloud Spanner database. Defaults to: true.
* **rfcStartDateTime** : The starting DateTime used to fetch from Cloud Storage (https://tools.ietf.org/html/rfc3339). Defaults to: 1970-01-01T00:00:00.00Z.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ public interface Options extends PipelineOptions, StreamingOptions {
description = "The Pub/Sub subscription being used in a Cloud Storage notification policy.",
helpText =
"The Pub/Sub subscription being used in a Cloud Storage notification policy. For the name,"
+ " use the format of `projects/<PROJECT_ID>/subscriptions/<SUBSCRIPTION_NAME>`.")
+ " use the format `projects/<PROJECT_ID>/subscriptions/<SUBSCRIPTION_NAME>`.")
String getGcsPubSubSubscription();

void setGcsPubSubSubscription(String value);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions {
optional = true,
description = "Trust self-signed certificate",
helpText =
"Whether to trust self-signed certificate or not. An Elasticsearch instance installed might have a self-signed certificate. To bypass validation for the certificate, set to `true`. Defaults to `false`.")
"Whether to trust self-signed certificate or not. An Elasticsearch instance installed might have a self-signed certificate, Enable this to true to by-pass the validation on SSL certificate. (Defaults to: `false`)")
@Default.Boolean(false)
Boolean getTrustSelfSignedCerts();

Expand Down Expand Up @@ -310,7 +310,7 @@ public interface ElasticsearchWriteOptions extends PipelineOptions {
+ "is required when you use Secret Manager or KMS. If `apiKeySource` is set to `KMS`, "
+ "`apiKeyKMSEncryptionKey` and encrypted apiKey must be provided. If `apiKeySource` is set to "
+ "`SECRET_MANAGER`, `apiKeySecretId` must be provided. If `apiKeySource` is set to `PLAINTEXT`, "
+ "apiKey must be provided.")
+ "`apiKey` must be provided.")
@Default.String("PLAINTEXT")
String getApiKeySource();

Expand Down

0 comments on commit c173f1e

Please sign in to comment.