diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BasicYarnAutoscalingConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BasicYarnAutoscalingConfig.java index 8faca2f8..56895ed9 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BasicYarnAutoscalingConfig.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BasicYarnAutoscalingConfig.java @@ -209,11 +209,14 @@ public com.google.protobuf.DurationOrBuilder getGracefulDecommissionTimeoutOrBui * * *
- * Required. Fraction of average pending memory in the last cooldown period + * Required. Fraction of average YARN pending memory in the last cooldown period * for which to add workers. A scale-up factor of 1.0 will result in scaling * up so that there is no pending memory remaining after the update (more * aggressive scaling). A scale-up factor closer to 0 will result in a smaller * magnitude of scaling up (less aggressive scaling). + * See [How autoscaling + * works](/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + * for more information. * Bounds: [0.0, 1.0]. ** @@ -232,11 +235,14 @@ public double getScaleUpFactor() { * * *
- * Required. Fraction of average pending memory in the last cooldown period + * Required. Fraction of average YARN pending memory in the last cooldown period * for which to remove workers. A scale-down factor of 1 will result in * scaling down so that there is no available memory remaining after the * update (more aggressive scaling). A scale-down factor of 0 disables * removing workers, which can be beneficial for autoscaling a single job. + * See [How autoscaling + * works](/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + * for more information. * Bounds: [0.0, 1.0]. ** @@ -952,11 +958,14 @@ public com.google.protobuf.DurationOrBuilder getGracefulDecommissionTimeoutOrBui * * *
- * Required. Fraction of average pending memory in the last cooldown period + * Required. Fraction of average YARN pending memory in the last cooldown period * for which to add workers. A scale-up factor of 1.0 will result in scaling * up so that there is no pending memory remaining after the update (more * aggressive scaling). A scale-up factor closer to 0 will result in a smaller * magnitude of scaling up (less aggressive scaling). + * See [How autoscaling + * works](/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + * for more information. * Bounds: [0.0, 1.0]. ** @@ -972,11 +981,14 @@ public double getScaleUpFactor() { * * *
- * Required. Fraction of average pending memory in the last cooldown period + * Required. Fraction of average YARN pending memory in the last cooldown period * for which to add workers. A scale-up factor of 1.0 will result in scaling * up so that there is no pending memory remaining after the update (more * aggressive scaling). A scale-up factor closer to 0 will result in a smaller * magnitude of scaling up (less aggressive scaling). + * See [How autoscaling + * works](/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + * for more information. * Bounds: [0.0, 1.0]. ** @@ -995,11 +1007,14 @@ public Builder setScaleUpFactor(double value) { * * *
- * Required. Fraction of average pending memory in the last cooldown period + * Required. Fraction of average YARN pending memory in the last cooldown period * for which to add workers. A scale-up factor of 1.0 will result in scaling * up so that there is no pending memory remaining after the update (more * aggressive scaling). A scale-up factor closer to 0 will result in a smaller * magnitude of scaling up (less aggressive scaling). + * See [How autoscaling + * works](/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + * for more information. * Bounds: [0.0, 1.0]. ** @@ -1019,11 +1034,14 @@ public Builder clearScaleUpFactor() { * * *
- * Required. Fraction of average pending memory in the last cooldown period + * Required. Fraction of average YARN pending memory in the last cooldown period * for which to remove workers. A scale-down factor of 1 will result in * scaling down so that there is no available memory remaining after the * update (more aggressive scaling). A scale-down factor of 0 disables * removing workers, which can be beneficial for autoscaling a single job. + * See [How autoscaling + * works](/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + * for more information. * Bounds: [0.0, 1.0]. ** @@ -1039,11 +1057,14 @@ public double getScaleDownFactor() { * * *
- * Required. Fraction of average pending memory in the last cooldown period + * Required. Fraction of average YARN pending memory in the last cooldown period * for which to remove workers. A scale-down factor of 1 will result in * scaling down so that there is no available memory remaining after the * update (more aggressive scaling). A scale-down factor of 0 disables * removing workers, which can be beneficial for autoscaling a single job. + * See [How autoscaling + * works](/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + * for more information. * Bounds: [0.0, 1.0]. ** @@ -1062,11 +1083,14 @@ public Builder setScaleDownFactor(double value) { * * *
- * Required. Fraction of average pending memory in the last cooldown period + * Required. Fraction of average YARN pending memory in the last cooldown period * for which to remove workers. A scale-down factor of 1 will result in * scaling down so that there is no available memory remaining after the * update (more aggressive scaling). A scale-down factor of 0 disables * removing workers, which can be beneficial for autoscaling a single job. + * See [How autoscaling + * works](/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + * for more information. * Bounds: [0.0, 1.0]. ** diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BasicYarnAutoscalingConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BasicYarnAutoscalingConfigOrBuilder.java index cae882bd..8cb39296 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BasicYarnAutoscalingConfigOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BasicYarnAutoscalingConfigOrBuilder.java @@ -80,11 +80,14 @@ public interface BasicYarnAutoscalingConfigOrBuilder * * *
- * Required. Fraction of average pending memory in the last cooldown period + * Required. Fraction of average YARN pending memory in the last cooldown period * for which to add workers. A scale-up factor of 1.0 will result in scaling * up so that there is no pending memory remaining after the update (more * aggressive scaling). A scale-up factor closer to 0 will result in a smaller * magnitude of scaling up (less aggressive scaling). + * See [How autoscaling + * works](/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + * for more information. * Bounds: [0.0, 1.0]. ** @@ -98,11 +101,14 @@ public interface BasicYarnAutoscalingConfigOrBuilder * * *
- * Required. Fraction of average pending memory in the last cooldown period + * Required. Fraction of average YARN pending memory in the last cooldown period * for which to remove workers. A scale-down factor of 1 will result in * scaling down so that there is no available memory remaining after the * update (more aggressive scaling). A scale-down factor of 0 disables * removing workers, which can be beneficial for autoscaling a single job. + * See [How autoscaling + * works](/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + * for more information. * Bounds: [0.0, 1.0]. ** diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java index 65d155c5..4e903d22 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java @@ -39,6 +39,7 @@ private ClusterConfig(com.google.protobuf.GeneratedMessageV3.Builder> builder) private ClusterConfig() { configBucket_ = ""; + tempBucket_ = ""; initializationActions_ = java.util.Collections.emptyList(); } @@ -79,6 +80,13 @@ private ClusterConfig( configBucket_ = s; break; } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + + tempBucket_ = s; + break; + } case 66: { com.google.cloud.dataproc.v1.GceClusterConfig.Builder subBuilder = null; @@ -235,6 +243,22 @@ private ClusterConfig( autoscalingConfig_ = subBuilder.buildPartial(); } + break; + } + case 154: + { + com.google.cloud.dataproc.v1.EndpointConfig.Builder subBuilder = null; + if (endpointConfig_ != null) { + subBuilder = endpointConfig_.toBuilder(); + } + endpointConfig_ = + input.readMessage( + com.google.cloud.dataproc.v1.EndpointConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(endpointConfig_); + endpointConfig_ = subBuilder.buildPartial(); + } + break; } default: @@ -339,6 +363,71 @@ public com.google.protobuf.ByteString getConfigBucketBytes() { } } + public static final int TEMP_BUCKET_FIELD_NUMBER = 2; + private volatile java.lang.Object tempBucket_; + /** + * + * + *
+ * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, + * such as Spark and MapReduce history files. + * If you do not specify a temp bucket, + * Dataproc will determine a Cloud Storage location (US, + * ASIA, or EU) for your cluster's temp bucket according to the + * Compute Engine zone where your cluster is deployed, and then create + * and manage this project-level, per-location bucket. The default bucket has + * a TTL of 90 days, but you can use any TTL (or none) if you specify a + * bucket. + *+ * + *
string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The tempBucket.
+ */
+ @java.lang.Override
+ public java.lang.String getTempBucket() {
+ java.lang.Object ref = tempBucket_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ tempBucket_ = s;
+ return s;
+ }
+ }
+ /**
+ *
+ *
+ * + * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, + * such as Spark and MapReduce history files. + * If you do not specify a temp bucket, + * Dataproc will determine a Cloud Storage location (US, + * ASIA, or EU) for your cluster's temp bucket according to the + * Compute Engine zone where your cluster is deployed, and then create + * and manage this project-level, per-location bucket. The default bucket has + * a TTL of 90 days, but you can use any TTL (or none) if you specify a + * bucket. + *+ * + *
string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for tempBucket.
+ */
+ @java.lang.Override
+ public com.google.protobuf.ByteString getTempBucketBytes() {
+ java.lang.Object ref = tempBucket_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ tempBucket_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
public static final int GCE_CLUSTER_CONFIG_FIELD_NUMBER = 8;
private com.google.cloud.dataproc.v1.GceClusterConfig gceClusterConfig_;
/**
@@ -978,6 +1067,60 @@ public com.google.cloud.dataproc.v1.LifecycleConfigOrBuilder getLifecycleConfigO
return getLifecycleConfig();
}
+ public static final int ENDPOINT_CONFIG_FIELD_NUMBER = 19;
+ private com.google.cloud.dataproc.v1.EndpointConfig endpointConfig_;
+ /**
+ *
+ *
+ * + * Optional. Port/endpoint configuration for this cluster + *+ * + *
+ * .google.cloud.dataproc.v1.EndpointConfig endpoint_config = 19 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the endpointConfig field is set.
+ */
+ @java.lang.Override
+ public boolean hasEndpointConfig() {
+ return endpointConfig_ != null;
+ }
+ /**
+ *
+ *
+ * + * Optional. Port/endpoint configuration for this cluster + *+ * + *
+ * .google.cloud.dataproc.v1.EndpointConfig endpoint_config = 19 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The endpointConfig.
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.EndpointConfig getEndpointConfig() {
+ return endpointConfig_ == null
+ ? com.google.cloud.dataproc.v1.EndpointConfig.getDefaultInstance()
+ : endpointConfig_;
+ }
+ /**
+ *
+ *
+ * + * Optional. Port/endpoint configuration for this cluster + *+ * + *
+ * .google.cloud.dataproc.v1.EndpointConfig endpoint_config = 19 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.EndpointConfigOrBuilder getEndpointConfigOrBuilder() {
+ return getEndpointConfig();
+ }
+
private byte memoizedIsInitialized = -1;
@java.lang.Override
@@ -995,6 +1138,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
if (!getConfigBucketBytes().isEmpty()) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, configBucket_);
}
+ if (!getTempBucketBytes().isEmpty()) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 2, tempBucket_);
+ }
if (gceClusterConfig_ != null) {
output.writeMessage(8, getGceClusterConfig());
}
@@ -1025,6 +1171,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
if (autoscalingConfig_ != null) {
output.writeMessage(18, getAutoscalingConfig());
}
+ if (endpointConfig_ != null) {
+ output.writeMessage(19, getEndpointConfig());
+ }
unknownFields.writeTo(output);
}
@@ -1037,6 +1186,9 @@ public int getSerializedSize() {
if (!getConfigBucketBytes().isEmpty()) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, configBucket_);
}
+ if (!getTempBucketBytes().isEmpty()) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, tempBucket_);
+ }
if (gceClusterConfig_ != null) {
size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, getGceClusterConfig());
}
@@ -1070,6 +1222,9 @@ public int getSerializedSize() {
if (autoscalingConfig_ != null) {
size += com.google.protobuf.CodedOutputStream.computeMessageSize(18, getAutoscalingConfig());
}
+ if (endpointConfig_ != null) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(19, getEndpointConfig());
+ }
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
@@ -1087,6 +1242,7 @@ public boolean equals(final java.lang.Object obj) {
(com.google.cloud.dataproc.v1.ClusterConfig) obj;
if (!getConfigBucket().equals(other.getConfigBucket())) return false;
+ if (!getTempBucket().equals(other.getTempBucket())) return false;
if (hasGceClusterConfig() != other.hasGceClusterConfig()) return false;
if (hasGceClusterConfig()) {
if (!getGceClusterConfig().equals(other.getGceClusterConfig())) return false;
@@ -1124,6 +1280,10 @@ public boolean equals(final java.lang.Object obj) {
if (hasLifecycleConfig()) {
if (!getLifecycleConfig().equals(other.getLifecycleConfig())) return false;
}
+ if (hasEndpointConfig() != other.hasEndpointConfig()) return false;
+ if (hasEndpointConfig()) {
+ if (!getEndpointConfig().equals(other.getEndpointConfig())) return false;
+ }
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@@ -1137,6 +1297,8 @@ public int hashCode() {
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + CONFIG_BUCKET_FIELD_NUMBER;
hash = (53 * hash) + getConfigBucket().hashCode();
+ hash = (37 * hash) + TEMP_BUCKET_FIELD_NUMBER;
+ hash = (53 * hash) + getTempBucket().hashCode();
if (hasGceClusterConfig()) {
hash = (37 * hash) + GCE_CLUSTER_CONFIG_FIELD_NUMBER;
hash = (53 * hash) + getGceClusterConfig().hashCode();
@@ -1177,6 +1339,10 @@ public int hashCode() {
hash = (37 * hash) + LIFECYCLE_CONFIG_FIELD_NUMBER;
hash = (53 * hash) + getLifecycleConfig().hashCode();
}
+ if (hasEndpointConfig()) {
+ hash = (37 * hash) + ENDPOINT_CONFIG_FIELD_NUMBER;
+ hash = (53 * hash) + getEndpointConfig().hashCode();
+ }
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
@@ -1326,6 +1492,8 @@ public Builder clear() {
super.clear();
configBucket_ = "";
+ tempBucket_ = "";
+
if (gceClusterConfigBuilder_ == null) {
gceClusterConfig_ = null;
} else {
@@ -1386,6 +1554,12 @@ public Builder clear() {
lifecycleConfig_ = null;
lifecycleConfigBuilder_ = null;
}
+ if (endpointConfigBuilder_ == null) {
+ endpointConfig_ = null;
+ } else {
+ endpointConfig_ = null;
+ endpointConfigBuilder_ = null;
+ }
return this;
}
@@ -1415,6 +1589,7 @@ public com.google.cloud.dataproc.v1.ClusterConfig buildPartial() {
new com.google.cloud.dataproc.v1.ClusterConfig(this);
int from_bitField0_ = bitField0_;
result.configBucket_ = configBucket_;
+ result.tempBucket_ = tempBucket_;
if (gceClusterConfigBuilder_ == null) {
result.gceClusterConfig_ = gceClusterConfig_;
} else {
@@ -1469,6 +1644,11 @@ public com.google.cloud.dataproc.v1.ClusterConfig buildPartial() {
} else {
result.lifecycleConfig_ = lifecycleConfigBuilder_.build();
}
+ if (endpointConfigBuilder_ == null) {
+ result.endpointConfig_ = endpointConfig_;
+ } else {
+ result.endpointConfig_ = endpointConfigBuilder_.build();
+ }
onBuilt();
return result;
}
@@ -1522,6 +1702,10 @@ public Builder mergeFrom(com.google.cloud.dataproc.v1.ClusterConfig other) {
configBucket_ = other.configBucket_;
onChanged();
}
+ if (!other.getTempBucket().isEmpty()) {
+ tempBucket_ = other.tempBucket_;
+ onChanged();
+ }
if (other.hasGceClusterConfig()) {
mergeGceClusterConfig(other.getGceClusterConfig());
}
@@ -1576,6 +1760,9 @@ public Builder mergeFrom(com.google.cloud.dataproc.v1.ClusterConfig other) {
if (other.hasLifecycleConfig()) {
mergeLifecycleConfig(other.getLifecycleConfig());
}
+ if (other.hasEndpointConfig()) {
+ mergeEndpointConfig(other.getEndpointConfig());
+ }
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
@@ -1753,6 +1940,152 @@ public Builder setConfigBucketBytes(com.google.protobuf.ByteString value) {
return this;
}
+ private java.lang.Object tempBucket_ = "";
+ /**
+ *
+ *
+ * + * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, + * such as Spark and MapReduce history files. + * If you do not specify a temp bucket, + * Dataproc will determine a Cloud Storage location (US, + * ASIA, or EU) for your cluster's temp bucket according to the + * Compute Engine zone where your cluster is deployed, and then create + * and manage this project-level, per-location bucket. The default bucket has + * a TTL of 90 days, but you can use any TTL (or none) if you specify a + * bucket. + *+ * + *
string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The tempBucket.
+ */
+ public java.lang.String getTempBucket() {
+ java.lang.Object ref = tempBucket_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ tempBucket_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ *
+ *
+ * + * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, + * such as Spark and MapReduce history files. + * If you do not specify a temp bucket, + * Dataproc will determine a Cloud Storage location (US, + * ASIA, or EU) for your cluster's temp bucket according to the + * Compute Engine zone where your cluster is deployed, and then create + * and manage this project-level, per-location bucket. The default bucket has + * a TTL of 90 days, but you can use any TTL (or none) if you specify a + * bucket. + *+ * + *
string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for tempBucket.
+ */
+ public com.google.protobuf.ByteString getTempBucketBytes() {
+ java.lang.Object ref = tempBucket_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ tempBucket_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ *
+ *
+ * + * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, + * such as Spark and MapReduce history files. + * If you do not specify a temp bucket, + * Dataproc will determine a Cloud Storage location (US, + * ASIA, or EU) for your cluster's temp bucket according to the + * Compute Engine zone where your cluster is deployed, and then create + * and manage this project-level, per-location bucket. The default bucket has + * a TTL of 90 days, but you can use any TTL (or none) if you specify a + * bucket. + *+ * + *
string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The tempBucket to set.
+ * @return This builder for chaining.
+ */
+ public Builder setTempBucket(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ tempBucket_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, + * such as Spark and MapReduce history files. + * If you do not specify a temp bucket, + * Dataproc will determine a Cloud Storage location (US, + * ASIA, or EU) for your cluster's temp bucket according to the + * Compute Engine zone where your cluster is deployed, and then create + * and manage this project-level, per-location bucket. The default bucket has + * a TTL of 90 days, but you can use any TTL (or none) if you specify a + * bucket. + *+ * + *
string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearTempBucket() {
+
+ tempBucket_ = getDefaultInstance().getTempBucket();
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, + * such as Spark and MapReduce history files. + * If you do not specify a temp bucket, + * Dataproc will determine a Cloud Storage location (US, + * ASIA, or EU) for your cluster's temp bucket according to the + * Compute Engine zone where your cluster is deployed, and then create + * and manage this project-level, per-location bucket. The default bucket has + * a TTL of 90 days, but you can use any TTL (or none) if you specify a + * bucket. + *+ * + *
string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The bytes for tempBucket to set.
+ * @return This builder for chaining.
+ */
+ public Builder setTempBucketBytes(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ tempBucket_ = value;
+ onChanged();
+ return this;
+ }
+
private com.google.cloud.dataproc.v1.GceClusterConfig gceClusterConfig_;
private com.google.protobuf.SingleFieldBuilderV3<
com.google.cloud.dataproc.v1.GceClusterConfig,
@@ -4238,6 +4571,210 @@ public com.google.cloud.dataproc.v1.LifecycleConfigOrBuilder getLifecycleConfigO
return lifecycleConfigBuilder_;
}
+ private com.google.cloud.dataproc.v1.EndpointConfig endpointConfig_;
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.EndpointConfig,
+ com.google.cloud.dataproc.v1.EndpointConfig.Builder,
+ com.google.cloud.dataproc.v1.EndpointConfigOrBuilder>
+ endpointConfigBuilder_;
+ /**
+ *
+ *
+ * + * Optional. Port/endpoint configuration for this cluster + *+ * + *
+ * .google.cloud.dataproc.v1.EndpointConfig endpoint_config = 19 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the endpointConfig field is set.
+ */
+ public boolean hasEndpointConfig() {
+ return endpointConfigBuilder_ != null || endpointConfig_ != null;
+ }
+ /**
+ *
+ *
+ * + * Optional. Port/endpoint configuration for this cluster + *+ * + *
+ * .google.cloud.dataproc.v1.EndpointConfig endpoint_config = 19 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The endpointConfig.
+ */
+ public com.google.cloud.dataproc.v1.EndpointConfig getEndpointConfig() {
+ if (endpointConfigBuilder_ == null) {
+ return endpointConfig_ == null
+ ? com.google.cloud.dataproc.v1.EndpointConfig.getDefaultInstance()
+ : endpointConfig_;
+ } else {
+ return endpointConfigBuilder_.getMessage();
+ }
+ }
+ /**
+ *
+ *
+ * + * Optional. Port/endpoint configuration for this cluster + *+ * + *
+ * .google.cloud.dataproc.v1.EndpointConfig endpoint_config = 19 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder setEndpointConfig(com.google.cloud.dataproc.v1.EndpointConfig value) {
+ if (endpointConfigBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ endpointConfig_ = value;
+ onChanged();
+ } else {
+ endpointConfigBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. Port/endpoint configuration for this cluster + *+ * + *
+ * .google.cloud.dataproc.v1.EndpointConfig endpoint_config = 19 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder setEndpointConfig(
+ com.google.cloud.dataproc.v1.EndpointConfig.Builder builderForValue) {
+ if (endpointConfigBuilder_ == null) {
+ endpointConfig_ = builderForValue.build();
+ onChanged();
+ } else {
+ endpointConfigBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. Port/endpoint configuration for this cluster + *+ * + *
+ * .google.cloud.dataproc.v1.EndpointConfig endpoint_config = 19 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder mergeEndpointConfig(com.google.cloud.dataproc.v1.EndpointConfig value) {
+ if (endpointConfigBuilder_ == null) {
+ if (endpointConfig_ != null) {
+ endpointConfig_ =
+ com.google.cloud.dataproc.v1.EndpointConfig.newBuilder(endpointConfig_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ endpointConfig_ = value;
+ }
+ onChanged();
+ } else {
+ endpointConfigBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. Port/endpoint configuration for this cluster + *+ * + *
+ * .google.cloud.dataproc.v1.EndpointConfig endpoint_config = 19 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder clearEndpointConfig() {
+ if (endpointConfigBuilder_ == null) {
+ endpointConfig_ = null;
+ onChanged();
+ } else {
+ endpointConfig_ = null;
+ endpointConfigBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. Port/endpoint configuration for this cluster + *+ * + *
+ * .google.cloud.dataproc.v1.EndpointConfig endpoint_config = 19 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.EndpointConfig.Builder getEndpointConfigBuilder() {
+
+ onChanged();
+ return getEndpointConfigFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ * + * Optional. Port/endpoint configuration for this cluster + *+ * + *
+ * .google.cloud.dataproc.v1.EndpointConfig endpoint_config = 19 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.EndpointConfigOrBuilder getEndpointConfigOrBuilder() {
+ if (endpointConfigBuilder_ != null) {
+ return endpointConfigBuilder_.getMessageOrBuilder();
+ } else {
+ return endpointConfig_ == null
+ ? com.google.cloud.dataproc.v1.EndpointConfig.getDefaultInstance()
+ : endpointConfig_;
+ }
+ }
+ /**
+ *
+ *
+ * + * Optional. Port/endpoint configuration for this cluster + *+ * + *
+ * .google.cloud.dataproc.v1.EndpointConfig endpoint_config = 19 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.EndpointConfig,
+ com.google.cloud.dataproc.v1.EndpointConfig.Builder,
+ com.google.cloud.dataproc.v1.EndpointConfigOrBuilder>
+ getEndpointConfigFieldBuilder() {
+ if (endpointConfigBuilder_ == null) {
+ endpointConfigBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.EndpointConfig,
+ com.google.cloud.dataproc.v1.EndpointConfig.Builder,
+ com.google.cloud.dataproc.v1.EndpointConfigOrBuilder>(
+ getEndpointConfig(), getParentForChildren(), isClean());
+ endpointConfig_ = null;
+ }
+ return endpointConfigBuilder_;
+ }
+
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java
index 76f2b5e8..faec0811 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java
@@ -64,6 +64,47 @@ public interface ClusterConfigOrBuilder
*/
com.google.protobuf.ByteString getConfigBucketBytes();
+ /**
+ *
+ *
+ * + * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, + * such as Spark and MapReduce history files. + * If you do not specify a temp bucket, + * Dataproc will determine a Cloud Storage location (US, + * ASIA, or EU) for your cluster's temp bucket according to the + * Compute Engine zone where your cluster is deployed, and then create + * and manage this project-level, per-location bucket. The default bucket has + * a TTL of 90 days, but you can use any TTL (or none) if you specify a + * bucket. + *+ * + *
string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The tempBucket.
+ */
+ java.lang.String getTempBucket();
+ /**
+ *
+ *
+ * + * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, + * such as Spark and MapReduce history files. + * If you do not specify a temp bucket, + * Dataproc will determine a Cloud Storage location (US, + * ASIA, or EU) for your cluster's temp bucket according to the + * Compute Engine zone where your cluster is deployed, and then create + * and manage this project-level, per-location bucket. The default bucket has + * a TTL of 90 days, but you can use any TTL (or none) if you specify a + * bucket. + *+ * + *
string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for tempBucket.
+ */
+ com.google.protobuf.ByteString getTempBucketBytes();
+
/**
*
*
@@ -566,4 +607,45 @@ com.google.cloud.dataproc.v1.NodeInitializationActionOrBuilder getInitialization
*
*/
com.google.cloud.dataproc.v1.LifecycleConfigOrBuilder getLifecycleConfigOrBuilder();
+
+ /**
+ *
+ *
+ * + * Optional. Port/endpoint configuration for this cluster + *+ * + *
+ * .google.cloud.dataproc.v1.EndpointConfig endpoint_config = 19 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the endpointConfig field is set.
+ */
+ boolean hasEndpointConfig();
+ /**
+ *
+ *
+ * + * Optional. Port/endpoint configuration for this cluster + *+ * + *
+ * .google.cloud.dataproc.v1.EndpointConfig endpoint_config = 19 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The endpointConfig.
+ */
+ com.google.cloud.dataproc.v1.EndpointConfig getEndpointConfig();
+ /**
+ *
+ *
+ * + * Optional. Port/endpoint configuration for this cluster + *+ * + *
+ * .google.cloud.dataproc.v1.EndpointConfig endpoint_config = 19 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ com.google.cloud.dataproc.v1.EndpointConfigOrBuilder getEndpointConfigOrBuilder();
}
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClustersProto.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClustersProto.java
index dd0174cd..b4e41d27 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClustersProto.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClustersProto.java
@@ -39,6 +39,14 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r
internal_static_google_cloud_dataproc_v1_ClusterConfig_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_google_cloud_dataproc_v1_ClusterConfig_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_dataproc_v1_EndpointConfig_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_dataproc_v1_EndpointConfig_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_dataproc_v1_EndpointConfig_HttpPortsEntry_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_dataproc_v1_EndpointConfig_HttpPortsEntry_fieldAccessorTable;
static final com.google.protobuf.Descriptors.Descriptor
internal_static_google_cloud_dataproc_v1_AutoscalingConfig_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
@@ -175,185 +183,196 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "usterStatusB\003\340A\003\022\031\n\014cluster_uuid\030\006 \001(\tB\003"
+ "\340A\003\022>\n\007metrics\030\t \001(\0132(.google.cloud.data"
+ "proc.v1.ClusterMetricsB\003\340A\003\032-\n\013LabelsEnt"
- + "ry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\260\006\n\rC"
+ + "ry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\222\007\n\rC"
+ "lusterConfig\022\032\n\rconfig_bucket\030\001 \001(\tB\003\340A\001"
- + "\022K\n\022gce_cluster_config\030\010 \001(\0132*.google.cl"
- + "oud.dataproc.v1.GceClusterConfigB\003\340A\001\022I\n"
- + "\rmaster_config\030\t \001(\0132-.google.cloud.data"
- + "proc.v1.InstanceGroupConfigB\003\340A\001\022I\n\rwork"
- + "er_config\030\n \001(\0132-.google.cloud.dataproc."
- + "v1.InstanceGroupConfigB\003\340A\001\022S\n\027secondary"
- + "_worker_config\030\014 \001(\0132-.google.cloud.data"
- + "proc.v1.InstanceGroupConfigB\003\340A\001\022F\n\017soft"
- + "ware_config\030\r \001(\0132(.google.cloud.datapro"
- + "c.v1.SoftwareConfigB\003\340A\001\022W\n\026initializati"
- + "on_actions\030\013 \003(\01322.google.cloud.dataproc"
- + ".v1.NodeInitializationActionB\003\340A\001\022J\n\021enc"
- + "ryption_config\030\017 \001(\0132*.google.cloud.data"
- + "proc.v1.EncryptionConfigB\003\340A\001\022L\n\022autosca"
- + "ling_config\030\022 \001(\0132+.google.cloud.datapro"
- + "c.v1.AutoscalingConfigB\003\340A\001\022F\n\017security_"
- + "config\030\020 \001(\0132(.google.cloud.dataproc.v1."
- + "SecurityConfigB\003\340A\001\022H\n\020lifecycle_config\030"
- + "\021 \001(\0132).google.cloud.dataproc.v1.Lifecyc"
- + "leConfigB\003\340A\001\",\n\021AutoscalingConfig\022\027\n\npo"
- + "licy_uri\030\001 \001(\tB\003\340A\001\"4\n\020EncryptionConfig\022"
- + " \n\023gce_pd_kms_key_name\030\001 \001(\tB\003\340A\001\"\237\003\n\020Gc"
- + "eClusterConfig\022\025\n\010zone_uri\030\001 \001(\tB\003\340A\001\022\030\n"
- + "\013network_uri\030\002 \001(\tB\003\340A\001\022\033\n\016subnetwork_ur"
- + "i\030\006 \001(\tB\003\340A\001\022\035\n\020internal_ip_only\030\007 \001(\010B\003"
- + "\340A\001\022\034\n\017service_account\030\010 \001(\tB\003\340A\001\022#\n\026ser"
- + "vice_account_scopes\030\003 \003(\tB\003\340A\001\022\014\n\004tags\030\004"
- + " \003(\t\022J\n\010metadata\030\005 \003(\01328.google.cloud.da"
- + "taproc.v1.GceClusterConfig.MetadataEntry"
- + "\022P\n\024reservation_affinity\030\013 \001(\0132-.google."
- + "cloud.dataproc.v1.ReservationAffinityB\003\340"
- + "A\001\032/\n\rMetadataEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005valu"
- + "e\030\002 \001(\t:\0028\001\"\232\003\n\023InstanceGroupConfig\022\032\n\rn"
- + "um_instances\030\001 \001(\005B\003\340A\001\022\033\n\016instance_name"
- + "s\030\002 \003(\tB\003\340A\003\022\026\n\timage_uri\030\003 \001(\tB\003\340A\001\022\035\n\020"
- + "machine_type_uri\030\004 \001(\tB\003\340A\001\022>\n\013disk_conf"
- + "ig\030\005 \001(\0132$.google.cloud.dataproc.v1.Disk"
- + "ConfigB\003\340A\001\022\033\n\016is_preemptible\030\006 \001(\010B\003\340A\003"
- + "\022O\n\024managed_group_config\030\007 \001(\0132,.google."
- + "cloud.dataproc.v1.ManagedGroupConfigB\003\340A"
- + "\003\022F\n\014accelerators\030\010 \003(\0132+.google.cloud.d"
- + "ataproc.v1.AcceleratorConfigB\003\340A\001\022\035\n\020min"
- + "_cpu_platform\030\t \001(\tB\003\340A\001\"c\n\022ManagedGroup"
- + "Config\022#\n\026instance_template_name\030\001 \001(\tB\003"
- + "\340A\003\022(\n\033instance_group_manager_name\030\002 \001(\t"
- + "B\003\340A\003\"L\n\021AcceleratorConfig\022\034\n\024accelerato"
- + "r_type_uri\030\001 \001(\t\022\031\n\021accelerator_count\030\002 "
- + "\001(\005\"f\n\nDiskConfig\022\033\n\016boot_disk_type\030\003 \001("
- + "\tB\003\340A\001\022\036\n\021boot_disk_size_gb\030\001 \001(\005B\003\340A\001\022\033"
- + "\n\016num_local_ssds\030\002 \001(\005B\003\340A\001\"s\n\030NodeIniti"
- + "alizationAction\022\034\n\017executable_file\030\001 \001(\t"
- + "B\003\340A\002\0229\n\021execution_timeout\030\002 \001(\0132\031.googl"
- + "e.protobuf.DurationB\003\340A\001\"\204\003\n\rClusterStat"
- + "us\022A\n\005state\030\001 \001(\0162-.google.cloud.datapro"
- + "c.v1.ClusterStatus.StateB\003\340A\003\022\026\n\006detail\030"
- + "\002 \001(\tB\006\340A\003\340A\001\0229\n\020state_start_time\030\003 \001(\0132"
- + "\032.google.protobuf.TimestampB\003\340A\003\022G\n\010subs"
- + "tate\030\004 \001(\01620.google.cloud.dataproc.v1.Cl"
- + "usterStatus.SubstateB\003\340A\003\"V\n\005State\022\013\n\007UN"
- + "KNOWN\020\000\022\014\n\010CREATING\020\001\022\013\n\007RUNNING\020\002\022\t\n\005ER"
- + "ROR\020\003\022\014\n\010DELETING\020\004\022\014\n\010UPDATING\020\005\"<\n\010Sub"
- + "state\022\017\n\013UNSPECIFIED\020\000\022\r\n\tUNHEALTHY\020\001\022\020\n"
- + "\014STALE_STATUS\020\002\"S\n\016SecurityConfig\022A\n\017ker"
- + "beros_config\030\001 \001(\0132(.google.cloud.datapr"
- + "oc.v1.KerberosConfig\"\220\004\n\016KerberosConfig\022"
- + "\034\n\017enable_kerberos\030\001 \001(\010B\003\340A\001\022(\n\033root_pr"
- + "incipal_password_uri\030\002 \001(\tB\003\340A\002\022\030\n\013kms_k"
- + "ey_uri\030\003 \001(\tB\003\340A\002\022\031\n\014keystore_uri\030\004 \001(\tB"
- + "\003\340A\001\022\033\n\016truststore_uri\030\005 \001(\tB\003\340A\001\022\"\n\025key"
- + "store_password_uri\030\006 \001(\tB\003\340A\001\022\035\n\020key_pas"
- + "sword_uri\030\007 \001(\tB\003\340A\001\022$\n\027truststore_passw"
- + "ord_uri\030\010 \001(\tB\003\340A\001\022$\n\027cross_realm_trust_"
- + "realm\030\t \001(\tB\003\340A\001\022\"\n\025cross_realm_trust_kd"
- + "c\030\n \001(\tB\003\340A\001\022+\n\036cross_realm_trust_admin_"
- + "server\030\013 \001(\tB\003\340A\001\0222\n%cross_realm_trust_s"
- + "hared_password_uri\030\014 \001(\tB\003\340A\001\022\033\n\016kdc_db_"
- + "key_uri\030\r \001(\tB\003\340A\001\022\037\n\022tgt_lifetime_hours"
- + "\030\016 \001(\005B\003\340A\001\022\022\n\005realm\030\017 \001(\tB\003\340A\001\"\371\001\n\016Soft"
- + "wareConfig\022\032\n\rimage_version\030\001 \001(\tB\003\340A\001\022Q"
- + "\n\nproperties\030\002 \003(\01328.google.cloud.datapr"
- + "oc.v1.SoftwareConfig.PropertiesEntryB\003\340A"
- + "\001\022E\n\023optional_components\030\003 \003(\0162#.google."
- + "cloud.dataproc.v1.ComponentB\003\340A\001\0321\n\017Prop"
- + "ertiesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:"
- + "\0028\001\"\203\002\n\017LifecycleConfig\0227\n\017idle_delete_t"
- + "tl\030\001 \001(\0132\031.google.protobuf.DurationB\003\340A\001"
- + "\022;\n\020auto_delete_time\030\002 \001(\0132\032.google.prot"
- + "obuf.TimestampB\003\340A\001H\000\0229\n\017auto_delete_ttl"
- + "\030\003 \001(\0132\031.google.protobuf.DurationB\003\340A\001H\000"
- + "\0228\n\017idle_start_time\030\004 \001(\0132\032.google.proto"
- + "buf.TimestampB\003\340A\003B\005\n\003ttl\"\232\002\n\016ClusterMet"
- + "rics\022O\n\014hdfs_metrics\030\001 \003(\01329.google.clou"
- + "d.dataproc.v1.ClusterMetrics.HdfsMetrics"
- + "Entry\022O\n\014yarn_metrics\030\002 \003(\01329.google.clo"
- + "ud.dataproc.v1.ClusterMetrics.YarnMetric"
- + "sEntry\0322\n\020HdfsMetricsEntry\022\013\n\003key\030\001 \001(\t\022"
- + "\r\n\005value\030\002 \001(\003:\0028\001\0322\n\020YarnMetricsEntry\022\013"
- + "\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\003:\0028\001\"\226\001\n\024Creat"
- + "eClusterRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002"
- + "\022\023\n\006region\030\003 \001(\tB\003\340A\002\0227\n\007cluster\030\002 \001(\0132!"
- + ".google.cloud.dataproc.v1.ClusterB\003\340A\002\022\027"
- + "\n\nrequest_id\030\004 \001(\tB\003\340A\001\"\256\002\n\024UpdateCluste"
- + "rRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006reg"
- + "ion\030\005 \001(\tB\003\340A\002\022\031\n\014cluster_name\030\002 \001(\tB\003\340A"
- + "\002\0227\n\007cluster\030\003 \001(\0132!.google.cloud.datapr"
- + "oc.v1.ClusterB\003\340A\002\022E\n\035graceful_decommiss"
- + "ion_timeout\030\006 \001(\0132\031.google.protobuf.Dura"
- + "tionB\003\340A\001\0224\n\013update_mask\030\004 \001(\0132\032.google."
- + "protobuf.FieldMaskB\003\340A\002\022\027\n\nrequest_id\030\007 "
- + "\001(\tB\003\340A\001\"\223\001\n\024DeleteClusterRequest\022\027\n\npro"
- + "ject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340A\002\022"
- + "\031\n\014cluster_name\030\002 \001(\tB\003\340A\002\022\031\n\014cluster_uu"
- + "id\030\004 \001(\tB\003\340A\001\022\027\n\nrequest_id\030\005 \001(\tB\003\340A\001\"\\"
- + "\n\021GetClusterRequest\022\027\n\nproject_id\030\001 \001(\tB"
+ + "\022\030\n\013temp_bucket\030\002 \001(\tB\003\340A\001\022K\n\022gce_cluste"
+ + "r_config\030\010 \001(\0132*.google.cloud.dataproc.v"
+ + "1.GceClusterConfigB\003\340A\001\022I\n\rmaster_config"
+ + "\030\t \001(\0132-.google.cloud.dataproc.v1.Instan"
+ + "ceGroupConfigB\003\340A\001\022I\n\rworker_config\030\n \001("
+ + "\0132-.google.cloud.dataproc.v1.InstanceGro"
+ + "upConfigB\003\340A\001\022S\n\027secondary_worker_config"
+ + "\030\014 \001(\0132-.google.cloud.dataproc.v1.Instan"
+ + "ceGroupConfigB\003\340A\001\022F\n\017software_config\030\r "
+ + "\001(\0132(.google.cloud.dataproc.v1.SoftwareC"
+ + "onfigB\003\340A\001\022W\n\026initialization_actions\030\013 \003"
+ + "(\01322.google.cloud.dataproc.v1.NodeInitia"
+ + "lizationActionB\003\340A\001\022J\n\021encryption_config"
+ + "\030\017 \001(\0132*.google.cloud.dataproc.v1.Encryp"
+ + "tionConfigB\003\340A\001\022L\n\022autoscaling_config\030\022 "
+ + "\001(\0132+.google.cloud.dataproc.v1.Autoscali"
+ + "ngConfigB\003\340A\001\022F\n\017security_config\030\020 \001(\0132("
+ + ".google.cloud.dataproc.v1.SecurityConfig"
+ + "B\003\340A\001\022H\n\020lifecycle_config\030\021 \001(\0132).google"
+ + ".cloud.dataproc.v1.LifecycleConfigB\003\340A\001\022"
+ + "F\n\017endpoint_config\030\023 \001(\0132(.google.cloud."
+ + "dataproc.v1.EndpointConfigB\003\340A\001\"\272\001\n\016Endp"
+ + "ointConfig\022P\n\nhttp_ports\030\001 \003(\01327.google."
+ + "cloud.dataproc.v1.EndpointConfig.HttpPor"
+ + "tsEntryB\003\340A\003\022$\n\027enable_http_port_access\030"
+ + "\002 \001(\010B\003\340A\001\0320\n\016HttpPortsEntry\022\013\n\003key\030\001 \001("
+ + "\t\022\r\n\005value\030\002 \001(\t:\0028\001\",\n\021AutoscalingConfi"
+ + "g\022\027\n\npolicy_uri\030\001 \001(\tB\003\340A\001\"4\n\020Encryption"
+ + "Config\022 \n\023gce_pd_kms_key_name\030\001 \001(\tB\003\340A\001"
+ + "\"\237\003\n\020GceClusterConfig\022\025\n\010zone_uri\030\001 \001(\tB"
+ + "\003\340A\001\022\030\n\013network_uri\030\002 \001(\tB\003\340A\001\022\033\n\016subnet"
+ + "work_uri\030\006 \001(\tB\003\340A\001\022\035\n\020internal_ip_only\030"
+ + "\007 \001(\010B\003\340A\001\022\034\n\017service_account\030\010 \001(\tB\003\340A\001"
+ + "\022#\n\026service_account_scopes\030\003 \003(\tB\003\340A\001\022\014\n"
+ + "\004tags\030\004 \003(\t\022J\n\010metadata\030\005 \003(\01328.google.c"
+ + "loud.dataproc.v1.GceClusterConfig.Metada"
+ + "taEntry\022P\n\024reservation_affinity\030\013 \001(\0132-."
+ + "google.cloud.dataproc.v1.ReservationAffi"
+ + "nityB\003\340A\001\032/\n\rMetadataEntry\022\013\n\003key\030\001 \001(\t\022"
+ + "\r\n\005value\030\002 \001(\t:\0028\001\"\315\004\n\023InstanceGroupConf"
+ + "ig\022\032\n\rnum_instances\030\001 \001(\005B\003\340A\001\022\033\n\016instan"
+ + "ce_names\030\002 \003(\tB\003\340A\003\022\026\n\timage_uri\030\003 \001(\tB\003"
+ + "\340A\001\022\035\n\020machine_type_uri\030\004 \001(\tB\003\340A\001\022>\n\013di"
+ + "sk_config\030\005 \001(\0132$.google.cloud.dataproc."
+ + "v1.DiskConfigB\003\340A\001\022\033\n\016is_preemptible\030\006 \001"
+ + "(\010B\003\340A\003\022Y\n\016preemptibility\030\n \001(\0162<.google"
+ + ".cloud.dataproc.v1.InstanceGroupConfig.P"
+ + "reemptibilityB\003\340A\001\022O\n\024managed_group_conf"
+ + "ig\030\007 \001(\0132,.google.cloud.dataproc.v1.Mana"
+ + "gedGroupConfigB\003\340A\003\022F\n\014accelerators\030\010 \003("
+ + "\0132+.google.cloud.dataproc.v1.Accelerator"
+ + "ConfigB\003\340A\001\022\035\n\020min_cpu_platform\030\t \001(\tB\003\340"
+ + "A\001\"V\n\016Preemptibility\022\036\n\032PREEMPTIBILITY_U"
+ + "NSPECIFIED\020\000\022\023\n\017NON_PREEMPTIBLE\020\001\022\017\n\013PRE"
+ + "EMPTIBLE\020\002\"c\n\022ManagedGroupConfig\022#\n\026inst"
+ + "ance_template_name\030\001 \001(\tB\003\340A\003\022(\n\033instanc"
+ + "e_group_manager_name\030\002 \001(\tB\003\340A\003\"L\n\021Accel"
+ + "eratorConfig\022\034\n\024accelerator_type_uri\030\001 \001"
+ + "(\t\022\031\n\021accelerator_count\030\002 \001(\005\"f\n\nDiskCon"
+ + "fig\022\033\n\016boot_disk_type\030\003 \001(\tB\003\340A\001\022\036\n\021boot"
+ + "_disk_size_gb\030\001 \001(\005B\003\340A\001\022\033\n\016num_local_ss"
+ + "ds\030\002 \001(\005B\003\340A\001\"s\n\030NodeInitializationActio"
+ + "n\022\034\n\017executable_file\030\001 \001(\tB\003\340A\002\0229\n\021execu"
+ + "tion_timeout\030\002 \001(\0132\031.google.protobuf.Dur"
+ + "ationB\003\340A\001\"\204\003\n\rClusterStatus\022A\n\005state\030\001 "
+ + "\001(\0162-.google.cloud.dataproc.v1.ClusterSt"
+ + "atus.StateB\003\340A\003\022\026\n\006detail\030\002 \001(\tB\006\340A\003\340A\001\022"
+ + "9\n\020state_start_time\030\003 \001(\0132\032.google.proto"
+ + "buf.TimestampB\003\340A\003\022G\n\010substate\030\004 \001(\01620.g"
+ + "oogle.cloud.dataproc.v1.ClusterStatus.Su"
+ + "bstateB\003\340A\003\"V\n\005State\022\013\n\007UNKNOWN\020\000\022\014\n\010CRE"
+ + "ATING\020\001\022\013\n\007RUNNING\020\002\022\t\n\005ERROR\020\003\022\014\n\010DELET"
+ + "ING\020\004\022\014\n\010UPDATING\020\005\"<\n\010Substate\022\017\n\013UNSPE"
+ + "CIFIED\020\000\022\r\n\tUNHEALTHY\020\001\022\020\n\014STALE_STATUS\020"
+ + "\002\"S\n\016SecurityConfig\022A\n\017kerberos_config\030\001"
+ + " \001(\0132(.google.cloud.dataproc.v1.Kerberos"
+ + "Config\"\220\004\n\016KerberosConfig\022\034\n\017enable_kerb"
+ + "eros\030\001 \001(\010B\003\340A\001\022(\n\033root_principal_passwo"
+ + "rd_uri\030\002 \001(\tB\003\340A\002\022\030\n\013kms_key_uri\030\003 \001(\tB\003"
+ + "\340A\002\022\031\n\014keystore_uri\030\004 \001(\tB\003\340A\001\022\033\n\016trusts"
+ + "tore_uri\030\005 \001(\tB\003\340A\001\022\"\n\025keystore_password"
+ + "_uri\030\006 \001(\tB\003\340A\001\022\035\n\020key_password_uri\030\007 \001("
+ + "\tB\003\340A\001\022$\n\027truststore_password_uri\030\010 \001(\tB"
+ + "\003\340A\001\022$\n\027cross_realm_trust_realm\030\t \001(\tB\003\340"
+ + "A\001\022\"\n\025cross_realm_trust_kdc\030\n \001(\tB\003\340A\001\022+"
+ + "\n\036cross_realm_trust_admin_server\030\013 \001(\tB\003"
+ + "\340A\001\0222\n%cross_realm_trust_shared_password"
+ + "_uri\030\014 \001(\tB\003\340A\001\022\033\n\016kdc_db_key_uri\030\r \001(\tB"
+ + "\003\340A\001\022\037\n\022tgt_lifetime_hours\030\016 \001(\005B\003\340A\001\022\022\n"
+ + "\005realm\030\017 \001(\tB\003\340A\001\"\371\001\n\016SoftwareConfig\022\032\n\r"
+ + "image_version\030\001 \001(\tB\003\340A\001\022Q\n\nproperties\030\002"
+ + " \003(\01328.google.cloud.dataproc.v1.Software"
+ + "Config.PropertiesEntryB\003\340A\001\022E\n\023optional_"
+ + "components\030\003 \003(\0162#.google.cloud.dataproc"
+ + ".v1.ComponentB\003\340A\001\0321\n\017PropertiesEntry\022\013\n"
+ + "\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\203\002\n\017Lifecy"
+ + "cleConfig\0227\n\017idle_delete_ttl\030\001 \001(\0132\031.goo"
+ + "gle.protobuf.DurationB\003\340A\001\022;\n\020auto_delet"
+ + "e_time\030\002 \001(\0132\032.google.protobuf.Timestamp"
+ + "B\003\340A\001H\000\0229\n\017auto_delete_ttl\030\003 \001(\0132\031.googl"
+ + "e.protobuf.DurationB\003\340A\001H\000\0228\n\017idle_start"
+ + "_time\030\004 \001(\0132\032.google.protobuf.TimestampB"
+ + "\003\340A\003B\005\n\003ttl\"\232\002\n\016ClusterMetrics\022O\n\014hdfs_m"
+ + "etrics\030\001 \003(\01329.google.cloud.dataproc.v1."
+ + "ClusterMetrics.HdfsMetricsEntry\022O\n\014yarn_"
+ + "metrics\030\002 \003(\01329.google.cloud.dataproc.v1"
+ + ".ClusterMetrics.YarnMetricsEntry\0322\n\020Hdfs"
+ + "MetricsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\003"
+ + ":\0028\001\0322\n\020YarnMetricsEntry\022\013\n\003key\030\001 \001(\t\022\r\n"
+ + "\005value\030\002 \001(\003:\0028\001\"\226\001\n\024CreateClusterReques"
+ + "t\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001"
+ + "(\tB\003\340A\002\0227\n\007cluster\030\002 \001(\0132!.google.cloud."
+ + "dataproc.v1.ClusterB\003\340A\002\022\027\n\nrequest_id\030\004"
+ + " \001(\tB\003\340A\001\"\256\002\n\024UpdateClusterRequest\022\027\n\npr"
+ + "oject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\005 \001(\tB\003\340A\002"
+ + "\022\031\n\014cluster_name\030\002 \001(\tB\003\340A\002\0227\n\007cluster\030\003"
+ + " \001(\0132!.google.cloud.dataproc.v1.ClusterB"
+ + "\003\340A\002\022E\n\035graceful_decommission_timeout\030\006 "
+ + "\001(\0132\031.google.protobuf.DurationB\003\340A\001\0224\n\013u"
+ + "pdate_mask\030\004 \001(\0132\032.google.protobuf.Field"
+ + "MaskB\003\340A\002\022\027\n\nrequest_id\030\007 \001(\tB\003\340A\001\"\223\001\n\024D"
+ + "eleteClusterRequest\022\027\n\nproject_id\030\001 \001(\tB"
+ "\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340A\002\022\031\n\014cluster_nam"
- + "e\030\002 \001(\tB\003\340A\002\"\211\001\n\023ListClustersRequest\022\027\n\n"
- + "project_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\004 \001(\tB\003\340"
- + "A\002\022\023\n\006filter\030\005 \001(\tB\003\340A\001\022\026\n\tpage_size\030\002 \001"
- + "(\005B\003\340A\001\022\027\n\npage_token\030\003 \001(\tB\003\340A\001\"n\n\024List"
- + "ClustersResponse\0228\n\010clusters\030\001 \003(\0132!.goo"
- + "gle.cloud.dataproc.v1.ClusterB\003\340A\003\022\034\n\017ne"
- + "xt_page_token\030\002 \001(\tB\003\340A\003\"a\n\026DiagnoseClus"
- + "terRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006r"
- + "egion\030\003 \001(\tB\003\340A\002\022\031\n\014cluster_name\030\002 \001(\tB\003"
- + "\340A\002\"1\n\026DiagnoseClusterResults\022\027\n\noutput_"
- + "uri\030\001 \001(\tB\003\340A\003\"\370\001\n\023ReservationAffinity\022Y"
- + "\n\030consume_reservation_type\030\001 \001(\01622.googl"
- + "e.cloud.dataproc.v1.ReservationAffinity."
- + "TypeB\003\340A\001\022\020\n\003key\030\002 \001(\tB\003\340A\001\022\023\n\006values\030\003 "
- + "\003(\tB\003\340A\001\"_\n\004Type\022\024\n\020TYPE_UNSPECIFIED\020\000\022\022"
- + "\n\016NO_RESERVATION\020\001\022\023\n\017ANY_RESERVATION\020\002\022"
- + "\030\n\024SPECIFIC_RESERVATION\020\0032\377\014\n\021ClusterCon"
- + "troller\022\200\002\n\rCreateCluster\022..google.cloud"
- + ".dataproc.v1.CreateClusterRequest\032\035.goog"
- + "le.longrunning.Operation\"\237\001\202\323\344\223\002>\"3/v1/p"
- + "rojects/{project_id}/regions/{region}/cl"
- + "usters:\007cluster\332A\031project_id,region,clus"
- + "ter\312A<\n\007Cluster\0221google.cloud.dataproc.v"
- + "1.ClusterOperationMetadata\022\250\002\n\rUpdateClu"
- + "ster\022..google.cloud.dataproc.v1.UpdateCl"
- + "usterRequest\032\035.google.longrunning.Operat"
- + "ion\"\307\001\202\323\344\223\002M2B/v1/projects/{project_id}/"
- + "regions/{region}/clusters/{cluster_name}"
- + ":\007cluster\332A2project_id,region,cluster_na"
- + "me,cluster,update_mask\312A<\n\007Cluster\0221goog"
- + "le.cloud.dataproc.v1.ClusterOperationMet"
- + "adata\022\231\002\n\rDeleteCluster\022..google.cloud.d"
- + "ataproc.v1.DeleteClusterRequest\032\035.google"
- + ".longrunning.Operation\"\270\001\202\323\344\223\002D*B/v1/pro"
- + "jects/{project_id}/regions/{region}/clus"
- + "ters/{cluster_name}\332A\036project_id,region,"
- + "cluster_name\312AJ\n\025google.protobuf.Empty\0221"
- + "google.cloud.dataproc.v1.ClusterOperatio"
- + "nMetadata\022\311\001\n\nGetCluster\022+.google.cloud."
- + "dataproc.v1.GetClusterRequest\032!.google.c"
- + "loud.dataproc.v1.Cluster\"k\202\323\344\223\002D\022B/v1/pr"
- + "ojects/{project_id}/regions/{region}/clu"
- + "sters/{cluster_name}\332A\036project_id,region"
- + ",cluster_name\022\331\001\n\014ListClusters\022-.google."
- + "cloud.dataproc.v1.ListClustersRequest\032.."
- + "google.cloud.dataproc.v1.ListClustersRes"
- + "ponse\"j\202\323\344\223\0025\0223/v1/projects/{project_id}"
- + "/regions/{region}/clusters\332A\021project_id,"
- + "region\332A\030project_id,region,filter\022\252\002\n\017Di"
- + "agnoseCluster\0220.google.cloud.dataproc.v1"
- + ".DiagnoseClusterRequest\032\035.google.longrun"
- + "ning.Operation\"\305\001\202\323\344\223\002P\"K/v1/projects/{p"
- + "roject_id}/regions/{region}/clusters/{cl"
- + "uster_name}:diagnose:\001*\332A\036project_id,reg"
- + "ion,cluster_name\312AK\n\026DiagnoseClusterResu"
- + "lts\0221google.cloud.dataproc.v1.ClusterOpe"
- + "rationMetadata\032K\312A\027dataproc.googleapis.c"
- + "om\322A.https://www.googleapis.com/auth/clo"
- + "ud-platformBq\n\034com.google.cloud.dataproc"
- + ".v1B\rClustersProtoP\001Z@google.golang.org/"
- + "genproto/googleapis/cloud/dataproc/v1;da"
- + "taprocb\006proto3"
+ + "e\030\002 \001(\tB\003\340A\002\022\031\n\014cluster_uuid\030\004 \001(\tB\003\340A\001\022"
+ + "\027\n\nrequest_id\030\005 \001(\tB\003\340A\001\"\\\n\021GetClusterRe"
+ + "quest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region"
+ + "\030\003 \001(\tB\003\340A\002\022\031\n\014cluster_name\030\002 \001(\tB\003\340A\002\"\211"
+ + "\001\n\023ListClustersRequest\022\027\n\nproject_id\030\001 \001"
+ + "(\tB\003\340A\002\022\023\n\006region\030\004 \001(\tB\003\340A\002\022\023\n\006filter\030\005"
+ + " \001(\tB\003\340A\001\022\026\n\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\npag"
+ + "e_token\030\003 \001(\tB\003\340A\001\"n\n\024ListClustersRespon"
+ + "se\0228\n\010clusters\030\001 \003(\0132!.google.cloud.data"
+ + "proc.v1.ClusterB\003\340A\003\022\034\n\017next_page_token\030"
+ + "\002 \001(\tB\003\340A\003\"a\n\026DiagnoseClusterRequest\022\027\n\n"
+ + "project_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340"
+ + "A\002\022\031\n\014cluster_name\030\002 \001(\tB\003\340A\002\"1\n\026Diagnos"
+ + "eClusterResults\022\027\n\noutput_uri\030\001 \001(\tB\003\340A\003"
+ + "\"\370\001\n\023ReservationAffinity\022Y\n\030consume_rese"
+ + "rvation_type\030\001 \001(\01622.google.cloud.datapr"
+ + "oc.v1.ReservationAffinity.TypeB\003\340A\001\022\020\n\003k"
+ + "ey\030\002 \001(\tB\003\340A\001\022\023\n\006values\030\003 \003(\tB\003\340A\001\"_\n\004Ty"
+ + "pe\022\024\n\020TYPE_UNSPECIFIED\020\000\022\022\n\016NO_RESERVATI"
+ + "ON\020\001\022\023\n\017ANY_RESERVATION\020\002\022\030\n\024SPECIFIC_RE"
+ + "SERVATION\020\0032\377\014\n\021ClusterController\022\200\002\n\rCr"
+ + "eateCluster\022..google.cloud.dataproc.v1.C"
+ + "reateClusterRequest\032\035.google.longrunning"
+ + ".Operation\"\237\001\202\323\344\223\002>\"3/v1/projects/{proje"
+ + "ct_id}/regions/{region}/clusters:\007cluste"
+ + "r\332A\031project_id,region,cluster\312A<\n\007Cluste"
+ + "r\0221google.cloud.dataproc.v1.ClusterOpera"
+ + "tionMetadata\022\250\002\n\rUpdateCluster\022..google."
+ + "cloud.dataproc.v1.UpdateClusterRequest\032\035"
+ + ".google.longrunning.Operation\"\307\001\202\323\344\223\002M2B"
+ + "/v1/projects/{project_id}/regions/{regio"
+ + "n}/clusters/{cluster_name}:\007cluster\332A2pr"
+ + "oject_id,region,cluster_name,cluster,upd"
+ + "ate_mask\312A<\n\007Cluster\0221google.cloud.datap"
+ + "roc.v1.ClusterOperationMetadata\022\231\002\n\rDele"
+ + "teCluster\022..google.cloud.dataproc.v1.Del"
+ + "eteClusterRequest\032\035.google.longrunning.O"
+ + "peration\"\270\001\202\323\344\223\002D*B/v1/projects/{project"
+ + "_id}/regions/{region}/clusters/{cluster_"
+ + "name}\332A\036project_id,region,cluster_name\312A"
+ + "J\n\025google.protobuf.Empty\0221google.cloud.d"
+ + "ataproc.v1.ClusterOperationMetadata\022\311\001\n\n"
+ + "GetCluster\022+.google.cloud.dataproc.v1.Ge"
+ + "tClusterRequest\032!.google.cloud.dataproc."
+ + "v1.Cluster\"k\202\323\344\223\002D\022B/v1/projects/{projec"
+ + "t_id}/regions/{region}/clusters/{cluster"
+ + "_name}\332A\036project_id,region,cluster_name\022"
+ + "\331\001\n\014ListClusters\022-.google.cloud.dataproc"
+ + ".v1.ListClustersRequest\032..google.cloud.d"
+ + "ataproc.v1.ListClustersResponse\"j\202\323\344\223\0025\022"
+ + "3/v1/projects/{project_id}/regions/{regi"
+ + "on}/clusters\332A\021project_id,region\332A\030proje"
+ + "ct_id,region,filter\022\252\002\n\017DiagnoseCluster\022"
+ + "0.google.cloud.dataproc.v1.DiagnoseClust"
+ + "erRequest\032\035.google.longrunning.Operation"
+ + "\"\305\001\202\323\344\223\002P\"K/v1/projects/{project_id}/reg"
+ + "ions/{region}/clusters/{cluster_name}:di"
+ + "agnose:\001*\332A\036project_id,region,cluster_na"
+ + "me\312AK\n\026DiagnoseClusterResults\0221google.cl"
+ + "oud.dataproc.v1.ClusterOperationMetadata"
+ + "\032K\312A\027dataproc.googleapis.com\322A.https://w"
+ + "ww.googleapis.com/auth/cloud-platformBq\n"
+ + "\034com.google.cloud.dataproc.v1B\rClustersP"
+ + "rotoP\001Z@google.golang.org/genproto/googl"
+ + "eapis/cloud/dataproc/v1;dataprocb\006proto3"
};
descriptor =
com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
@@ -398,6 +417,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
internal_static_google_cloud_dataproc_v1_ClusterConfig_descriptor,
new java.lang.String[] {
"ConfigBucket",
+ "TempBucket",
"GceClusterConfig",
"MasterConfig",
"WorkerConfig",
@@ -408,9 +428,26 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"AutoscalingConfig",
"SecurityConfig",
"LifecycleConfig",
+ "EndpointConfig",
});
- internal_static_google_cloud_dataproc_v1_AutoscalingConfig_descriptor =
+ internal_static_google_cloud_dataproc_v1_EndpointConfig_descriptor =
getDescriptor().getMessageTypes().get(2);
+ internal_static_google_cloud_dataproc_v1_EndpointConfig_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_dataproc_v1_EndpointConfig_descriptor,
+ new java.lang.String[] {
+ "HttpPorts", "EnableHttpPortAccess",
+ });
+ internal_static_google_cloud_dataproc_v1_EndpointConfig_HttpPortsEntry_descriptor =
+ internal_static_google_cloud_dataproc_v1_EndpointConfig_descriptor.getNestedTypes().get(0);
+ internal_static_google_cloud_dataproc_v1_EndpointConfig_HttpPortsEntry_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_dataproc_v1_EndpointConfig_HttpPortsEntry_descriptor,
+ new java.lang.String[] {
+ "Key", "Value",
+ });
+ internal_static_google_cloud_dataproc_v1_AutoscalingConfig_descriptor =
+ getDescriptor().getMessageTypes().get(3);
internal_static_google_cloud_dataproc_v1_AutoscalingConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_AutoscalingConfig_descriptor,
@@ -418,7 +455,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"PolicyUri",
});
internal_static_google_cloud_dataproc_v1_EncryptionConfig_descriptor =
- getDescriptor().getMessageTypes().get(3);
+ getDescriptor().getMessageTypes().get(4);
internal_static_google_cloud_dataproc_v1_EncryptionConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_EncryptionConfig_descriptor,
@@ -426,7 +463,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"GcePdKmsKeyName",
});
internal_static_google_cloud_dataproc_v1_GceClusterConfig_descriptor =
- getDescriptor().getMessageTypes().get(4);
+ getDescriptor().getMessageTypes().get(5);
internal_static_google_cloud_dataproc_v1_GceClusterConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_GceClusterConfig_descriptor,
@@ -452,7 +489,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"Key", "Value",
});
internal_static_google_cloud_dataproc_v1_InstanceGroupConfig_descriptor =
- getDescriptor().getMessageTypes().get(5);
+ getDescriptor().getMessageTypes().get(6);
internal_static_google_cloud_dataproc_v1_InstanceGroupConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_InstanceGroupConfig_descriptor,
@@ -463,12 +500,13 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"MachineTypeUri",
"DiskConfig",
"IsPreemptible",
+ "Preemptibility",
"ManagedGroupConfig",
"Accelerators",
"MinCpuPlatform",
});
internal_static_google_cloud_dataproc_v1_ManagedGroupConfig_descriptor =
- getDescriptor().getMessageTypes().get(6);
+ getDescriptor().getMessageTypes().get(7);
internal_static_google_cloud_dataproc_v1_ManagedGroupConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_ManagedGroupConfig_descriptor,
@@ -476,7 +514,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"InstanceTemplateName", "InstanceGroupManagerName",
});
internal_static_google_cloud_dataproc_v1_AcceleratorConfig_descriptor =
- getDescriptor().getMessageTypes().get(7);
+ getDescriptor().getMessageTypes().get(8);
internal_static_google_cloud_dataproc_v1_AcceleratorConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_AcceleratorConfig_descriptor,
@@ -484,7 +522,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"AcceleratorTypeUri", "AcceleratorCount",
});
internal_static_google_cloud_dataproc_v1_DiskConfig_descriptor =
- getDescriptor().getMessageTypes().get(8);
+ getDescriptor().getMessageTypes().get(9);
internal_static_google_cloud_dataproc_v1_DiskConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_DiskConfig_descriptor,
@@ -492,7 +530,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"BootDiskType", "BootDiskSizeGb", "NumLocalSsds",
});
internal_static_google_cloud_dataproc_v1_NodeInitializationAction_descriptor =
- getDescriptor().getMessageTypes().get(9);
+ getDescriptor().getMessageTypes().get(10);
internal_static_google_cloud_dataproc_v1_NodeInitializationAction_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_NodeInitializationAction_descriptor,
@@ -500,7 +538,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"ExecutableFile", "ExecutionTimeout",
});
internal_static_google_cloud_dataproc_v1_ClusterStatus_descriptor =
- getDescriptor().getMessageTypes().get(10);
+ getDescriptor().getMessageTypes().get(11);
internal_static_google_cloud_dataproc_v1_ClusterStatus_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_ClusterStatus_descriptor,
@@ -508,7 +546,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"State", "Detail", "StateStartTime", "Substate",
});
internal_static_google_cloud_dataproc_v1_SecurityConfig_descriptor =
- getDescriptor().getMessageTypes().get(11);
+ getDescriptor().getMessageTypes().get(12);
internal_static_google_cloud_dataproc_v1_SecurityConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_SecurityConfig_descriptor,
@@ -516,7 +554,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"KerberosConfig",
});
internal_static_google_cloud_dataproc_v1_KerberosConfig_descriptor =
- getDescriptor().getMessageTypes().get(12);
+ getDescriptor().getMessageTypes().get(13);
internal_static_google_cloud_dataproc_v1_KerberosConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_KerberosConfig_descriptor,
@@ -538,7 +576,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"Realm",
});
internal_static_google_cloud_dataproc_v1_SoftwareConfig_descriptor =
- getDescriptor().getMessageTypes().get(13);
+ getDescriptor().getMessageTypes().get(14);
internal_static_google_cloud_dataproc_v1_SoftwareConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_SoftwareConfig_descriptor,
@@ -554,7 +592,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"Key", "Value",
});
internal_static_google_cloud_dataproc_v1_LifecycleConfig_descriptor =
- getDescriptor().getMessageTypes().get(14);
+ getDescriptor().getMessageTypes().get(15);
internal_static_google_cloud_dataproc_v1_LifecycleConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_LifecycleConfig_descriptor,
@@ -562,7 +600,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"IdleDeleteTtl", "AutoDeleteTime", "AutoDeleteTtl", "IdleStartTime", "Ttl",
});
internal_static_google_cloud_dataproc_v1_ClusterMetrics_descriptor =
- getDescriptor().getMessageTypes().get(15);
+ getDescriptor().getMessageTypes().get(16);
internal_static_google_cloud_dataproc_v1_ClusterMetrics_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_ClusterMetrics_descriptor,
@@ -586,7 +624,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"Key", "Value",
});
internal_static_google_cloud_dataproc_v1_CreateClusterRequest_descriptor =
- getDescriptor().getMessageTypes().get(16);
+ getDescriptor().getMessageTypes().get(17);
internal_static_google_cloud_dataproc_v1_CreateClusterRequest_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_CreateClusterRequest_descriptor,
@@ -594,7 +632,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"ProjectId", "Region", "Cluster", "RequestId",
});
internal_static_google_cloud_dataproc_v1_UpdateClusterRequest_descriptor =
- getDescriptor().getMessageTypes().get(17);
+ getDescriptor().getMessageTypes().get(18);
internal_static_google_cloud_dataproc_v1_UpdateClusterRequest_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_UpdateClusterRequest_descriptor,
@@ -608,7 +646,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"RequestId",
});
internal_static_google_cloud_dataproc_v1_DeleteClusterRequest_descriptor =
- getDescriptor().getMessageTypes().get(18);
+ getDescriptor().getMessageTypes().get(19);
internal_static_google_cloud_dataproc_v1_DeleteClusterRequest_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_DeleteClusterRequest_descriptor,
@@ -616,7 +654,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"ProjectId", "Region", "ClusterName", "ClusterUuid", "RequestId",
});
internal_static_google_cloud_dataproc_v1_GetClusterRequest_descriptor =
- getDescriptor().getMessageTypes().get(19);
+ getDescriptor().getMessageTypes().get(20);
internal_static_google_cloud_dataproc_v1_GetClusterRequest_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_GetClusterRequest_descriptor,
@@ -624,7 +662,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"ProjectId", "Region", "ClusterName",
});
internal_static_google_cloud_dataproc_v1_ListClustersRequest_descriptor =
- getDescriptor().getMessageTypes().get(20);
+ getDescriptor().getMessageTypes().get(21);
internal_static_google_cloud_dataproc_v1_ListClustersRequest_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_ListClustersRequest_descriptor,
@@ -632,7 +670,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"ProjectId", "Region", "Filter", "PageSize", "PageToken",
});
internal_static_google_cloud_dataproc_v1_ListClustersResponse_descriptor =
- getDescriptor().getMessageTypes().get(21);
+ getDescriptor().getMessageTypes().get(22);
internal_static_google_cloud_dataproc_v1_ListClustersResponse_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_ListClustersResponse_descriptor,
@@ -640,7 +678,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"Clusters", "NextPageToken",
});
internal_static_google_cloud_dataproc_v1_DiagnoseClusterRequest_descriptor =
- getDescriptor().getMessageTypes().get(22);
+ getDescriptor().getMessageTypes().get(23);
internal_static_google_cloud_dataproc_v1_DiagnoseClusterRequest_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_DiagnoseClusterRequest_descriptor,
@@ -648,7 +686,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"ProjectId", "Region", "ClusterName",
});
internal_static_google_cloud_dataproc_v1_DiagnoseClusterResults_descriptor =
- getDescriptor().getMessageTypes().get(23);
+ getDescriptor().getMessageTypes().get(24);
internal_static_google_cloud_dataproc_v1_DiagnoseClusterResults_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_DiagnoseClusterResults_descriptor,
@@ -656,7 +694,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"OutputUri",
});
internal_static_google_cloud_dataproc_v1_ReservationAffinity_descriptor =
- getDescriptor().getMessageTypes().get(24);
+ getDescriptor().getMessageTypes().get(25);
internal_static_google_cloud_dataproc_v1_ReservationAffinity_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_ReservationAffinity_descriptor,
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Component.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Component.java
index 6582b2a5..8a8254b3 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Component.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Component.java
@@ -32,7 +32,7 @@ public enum Component implements com.google.protobuf.ProtocolMessageEnum {
*
*
* - * Unspecified component. + * Unspecified component. Specifying this will cause Cluster creation to fail. ** *
COMPONENT_UNSPECIFIED = 0;
@@ -105,7 +105,7 @@ public enum Component implements com.google.protobuf.ProtocolMessageEnum {
*
*
* - * Unspecified component. + * Unspecified component. Specifying this will cause Cluster creation to fail. ** *
COMPONENT_UNSPECIFIED = 0;
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/EndpointConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/EndpointConfig.java
new file mode 100644
index 00000000..da50cb51
--- /dev/null
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/EndpointConfig.java
@@ -0,0 +1,899 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/dataproc/v1/clusters.proto
+
+package com.google.cloud.dataproc.v1;
+
+/**
+ *
+ *
+ * + * Endpoint config for this cluster + *+ * + * Protobuf type {@code google.cloud.dataproc.v1.EndpointConfig} + */ +public final class EndpointConfig extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.EndpointConfig) + EndpointConfigOrBuilder { + private static final long serialVersionUID = 0L; + // Use EndpointConfig.newBuilder() to construct. + private EndpointConfig(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + + private EndpointConfig() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new EndpointConfig(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private EndpointConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + httpPorts_ = + com.google.protobuf.MapField.newMapField( + HttpPortsDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000001; + } + com.google.protobuf.MapEntry
+ * Output only. The map of port descriptions to URLs. Will only be populated + * if enable_http_port_access is true. + *+ * + *
map<string, string> http_ports = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ */
+ @java.lang.Override
+ public boolean containsHttpPorts(java.lang.String key) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ return internalGetHttpPorts().getMap().containsKey(key);
+ }
+ /** Use {@link #getHttpPortsMap()} instead. */
+ @java.lang.Override
+ @java.lang.Deprecated
+ public java.util.Map+ * Output only. The map of port descriptions to URLs. Will only be populated + * if enable_http_port_access is true. + *+ * + *
map<string, string> http_ports = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ */
+ @java.lang.Override
+ public java.util.Map+ * Output only. The map of port descriptions to URLs. Will only be populated + * if enable_http_port_access is true. + *+ * + *
map<string, string> http_ports = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ */
+ @java.lang.Override
+ public java.lang.String getHttpPortsOrDefault(
+ java.lang.String key, java.lang.String defaultValue) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ java.util.Map+ * Output only. The map of port descriptions to URLs. Will only be populated + * if enable_http_port_access is true. + *+ * + *
map<string, string> http_ports = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ */
+ @java.lang.Override
+ public java.lang.String getHttpPortsOrThrow(java.lang.String key) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ java.util.Map+ * Optional. If true, enable http access to specific ports on the cluster + * from external sources. Defaults to false. + *+ * + *
bool enable_http_port_access = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The enableHttpPortAccess.
+ */
+ @java.lang.Override
+ public boolean getEnableHttpPortAccess() {
+ return enableHttpPortAccess_;
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ com.google.protobuf.GeneratedMessageV3.serializeStringMapTo(
+ output, internalGetHttpPorts(), HttpPortsDefaultEntryHolder.defaultEntry, 1);
+ if (enableHttpPortAccess_ != false) {
+ output.writeBool(2, enableHttpPortAccess_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (java.util.Map.Entry+ * Endpoint config for this cluster + *+ * + * Protobuf type {@code google.cloud.dataproc.v1.EndpointConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ * Output only. The map of port descriptions to URLs. Will only be populated + * if enable_http_port_access is true. + *+ * + *
map<string, string> http_ports = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ */
+ @java.lang.Override
+ public boolean containsHttpPorts(java.lang.String key) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ return internalGetHttpPorts().getMap().containsKey(key);
+ }
+ /** Use {@link #getHttpPortsMap()} instead. */
+ @java.lang.Override
+ @java.lang.Deprecated
+ public java.util.Map+ * Output only. The map of port descriptions to URLs. Will only be populated + * if enable_http_port_access is true. + *+ * + *
map<string, string> http_ports = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ */
+ @java.lang.Override
+ public java.util.Map+ * Output only. The map of port descriptions to URLs. Will only be populated + * if enable_http_port_access is true. + *+ * + *
map<string, string> http_ports = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ */
+ @java.lang.Override
+ public java.lang.String getHttpPortsOrDefault(
+ java.lang.String key, java.lang.String defaultValue) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ java.util.Map+ * Output only. The map of port descriptions to URLs. Will only be populated + * if enable_http_port_access is true. + *+ * + *
map<string, string> http_ports = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ */
+ @java.lang.Override
+ public java.lang.String getHttpPortsOrThrow(java.lang.String key) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ java.util.Map+ * Output only. The map of port descriptions to URLs. Will only be populated + * if enable_http_port_access is true. + *+ * + *
map<string, string> http_ports = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ */
+ public Builder removeHttpPorts(java.lang.String key) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ internalGetMutableHttpPorts().getMutableMap().remove(key);
+ return this;
+ }
+ /** Use alternate mutation accessors instead. */
+ @java.lang.Deprecated
+ public java.util.Map+ * Output only. The map of port descriptions to URLs. Will only be populated + * if enable_http_port_access is true. + *+ * + *
map<string, string> http_ports = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ */
+ public Builder putHttpPorts(java.lang.String key, java.lang.String value) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ if (value == null) {
+ throw new java.lang.NullPointerException();
+ }
+ internalGetMutableHttpPorts().getMutableMap().put(key, value);
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Output only. The map of port descriptions to URLs. Will only be populated + * if enable_http_port_access is true. + *+ * + *
map<string, string> http_ports = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ */
+ public Builder putAllHttpPorts(java.util.Map+ * Optional. If true, enable http access to specific ports on the cluster + * from external sources. Defaults to false. + *+ * + *
bool enable_http_port_access = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The enableHttpPortAccess.
+ */
+ @java.lang.Override
+ public boolean getEnableHttpPortAccess() {
+ return enableHttpPortAccess_;
+ }
+ /**
+ *
+ *
+ * + * Optional. If true, enable http access to specific ports on the cluster + * from external sources. Defaults to false. + *+ * + *
bool enable_http_port_access = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The enableHttpPortAccess to set.
+ * @return This builder for chaining.
+ */
+ public Builder setEnableHttpPortAccess(boolean value) {
+
+ enableHttpPortAccess_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. If true, enable http access to specific ports on the cluster + * from external sources. Defaults to false. + *+ * + *
bool enable_http_port_access = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearEnableHttpPortAccess() {
+
+ enableHttpPortAccess_ = false;
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.EndpointConfig)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.EndpointConfig)
+ private static final com.google.cloud.dataproc.v1.EndpointConfig DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.EndpointConfig();
+ }
+
+ public static com.google.cloud.dataproc.v1.EndpointConfig getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser+ * Output only. The map of port descriptions to URLs. Will only be populated + * if enable_http_port_access is true. + *+ * + *
map<string, string> http_ports = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ */
+ int getHttpPortsCount();
+ /**
+ *
+ *
+ * + * Output only. The map of port descriptions to URLs. Will only be populated + * if enable_http_port_access is true. + *+ * + *
map<string, string> http_ports = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ */
+ boolean containsHttpPorts(java.lang.String key);
+ /** Use {@link #getHttpPortsMap()} instead. */
+ @java.lang.Deprecated
+ java.util.Map+ * Output only. The map of port descriptions to URLs. Will only be populated + * if enable_http_port_access is true. + *+ * + *
map<string, string> http_ports = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ */
+ java.util.Map+ * Output only. The map of port descriptions to URLs. Will only be populated + * if enable_http_port_access is true. + *+ * + *
map<string, string> http_ports = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ */
+ java.lang.String getHttpPortsOrDefault(java.lang.String key, java.lang.String defaultValue);
+ /**
+ *
+ *
+ * + * Output only. The map of port descriptions to URLs. Will only be populated + * if enable_http_port_access is true. + *+ * + *
map<string, string> http_ports = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
+ *
+ */
+ java.lang.String getHttpPortsOrThrow(java.lang.String key);
+
+ /**
+ *
+ *
+ * + * Optional. If true, enable http access to specific ports on the cluster + * from external sources. Defaults to false. + *+ * + *
bool enable_http_port_access = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The enableHttpPortAccess.
+ */
+ boolean getEnableHttpPortAccess();
+}
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java
index 95918561..9ec84a3d 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java
@@ -433,7 +433,7 @@ public boolean getInternalIpOnly() {
*
* * Optional. The [Dataproc service - * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) + * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) * (also see [VM Data Plane * identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) * used by Dataproc cluster VM instances to access Google Cloud Platform @@ -465,7 +465,7 @@ public java.lang.String getServiceAccount() { * ** Optional. The [Dataproc service - * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) + * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) * (also see [VM Data Plane * identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) * used by Dataproc cluster VM instances to access Google Cloud Platform @@ -1846,7 +1846,7 @@ public Builder clearInternalIpOnly() { * ** Optional. The [Dataproc service - * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) + * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) * (also see [VM Data Plane * identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) * used by Dataproc cluster VM instances to access Google Cloud Platform @@ -1877,7 +1877,7 @@ public java.lang.String getServiceAccount() { * ** Optional. The [Dataproc service - * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) + * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) * (also see [VM Data Plane * identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) * used by Dataproc cluster VM instances to access Google Cloud Platform @@ -1908,7 +1908,7 @@ public com.google.protobuf.ByteString getServiceAccountBytes() { * ** Optional. The [Dataproc service - * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) + * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) * (also see [VM Data Plane * identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) * used by Dataproc cluster VM instances to access Google Cloud Platform @@ -1938,7 +1938,7 @@ public Builder setServiceAccount(java.lang.String value) { * ** Optional. The [Dataproc service - * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) + * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) * (also see [VM Data Plane * identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) * used by Dataproc cluster VM instances to access Google Cloud Platform @@ -1964,7 +1964,7 @@ public Builder clearServiceAccount() { * ** Optional. The [Dataproc service - * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) + * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) * (also see [VM Data Plane * identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) * used by Dataproc cluster VM instances to access Google Cloud Platform diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java index 0580305d..047c0eab 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java @@ -165,7 +165,7 @@ public interface GceClusterConfigOrBuilder * ** Optional. The [Dataproc service - * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) + * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) * (also see [VM Data Plane * identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) * used by Dataproc cluster VM instances to access Google Cloud Platform @@ -186,7 +186,7 @@ public interface GceClusterConfigOrBuilder * ** Optional. The [Dataproc service - * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) + * account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) * (also see [VM Data Plane * identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) * used by Dataproc cluster VM instances to access Google Cloud Platform diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java index 8000e114..9293c072 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java @@ -42,6 +42,7 @@ private InstanceGroupConfig() { instanceNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; imageUri_ = ""; machineTypeUri_ = ""; + preemptibility_ = 0; accelerators_ = java.util.Collections.emptyList(); minCpuPlatform_ = ""; } @@ -161,6 +162,13 @@ private InstanceGroupConfig( minCpuPlatform_ = s; break; } + case 80: + { + int rawValue = input.readEnum(); + + preemptibility_ = rawValue; + break; + } default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { @@ -201,6 +209,174 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { com.google.cloud.dataproc.v1.InstanceGroupConfig.Builder.class); } + /** + * + * + *+ * Controls the use of + * [preemptible instances] + * (https://cloud.google.com/compute/docs/instances/preemptible) + * within the group. + *+ * + * Protobuf enum {@code google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility} + */ + public enum Preemptibility implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *+ * Preemptibility is unspecified, the system will choose the + * appropriate setting for each instance group. + *+ * + *PREEMPTIBILITY_UNSPECIFIED = 0;
+ */ + PREEMPTIBILITY_UNSPECIFIED(0), + /** + * + * + *+ * Instances are non-preemptible. + * This option is allowed for all instance groups and is the only valid + * value for Master and Worker instance groups. + *+ * + *NON_PREEMPTIBLE = 1;
+ */ + NON_PREEMPTIBLE(1), + /** + * + * + *+ * Instances are preemptible. + * This option is allowed only for secondary worker groups. + *+ * + *PREEMPTIBLE = 2;
+ */ + PREEMPTIBLE(2), + UNRECOGNIZED(-1), + ; + + /** + * + * + *+ * Preemptibility is unspecified, the system will choose the + * appropriate setting for each instance group. + *+ * + *PREEMPTIBILITY_UNSPECIFIED = 0;
+ */ + public static final int PREEMPTIBILITY_UNSPECIFIED_VALUE = 0; + /** + * + * + *+ * Instances are non-preemptible. + * This option is allowed for all instance groups and is the only valid + * value for Master and Worker instance groups. + *+ * + *NON_PREEMPTIBLE = 1;
+ */ + public static final int NON_PREEMPTIBLE_VALUE = 1; + /** + * + * + *+ * Instances are preemptible. + * This option is allowed only for secondary worker groups. + *+ * + *PREEMPTIBLE = 2;
+ */ + public static final int PREEMPTIBLE_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Preemptibility valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Preemptibility forNumber(int value) { + switch (value) { + case 0: + return PREEMPTIBILITY_UNSPECIFIED; + case 1: + return NON_PREEMPTIBLE; + case 2: + return PREEMPTIBLE; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMapinternalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap () { + public Preemptibility findValueByNumber(int number) { + return Preemptibility.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.dataproc.v1.InstanceGroupConfig.getDescriptor().getEnumTypes().get(0); + } + + private static final Preemptibility[] VALUES = values(); + + public static Preemptibility valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Preemptibility(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility) + } + public static final int NUM_INSTANCES_FIELD_NUMBER = 1; private int numInstances_; /** @@ -496,6 +672,56 @@ public boolean getIsPreemptible() { return isPreemptible_; } + public static final int PREEMPTIBILITY_FIELD_NUMBER = 10; + private int preemptibility_; + /** + * + * + * + * Optional. Specifies the preemptibility of the instance group. + * The default value for master and worker groups is + * `NON_PREEMPTIBLE`. This default cannot be changed. + * The default value for secondary instances is + * `PREEMPTIBLE`. + *+ * + *+ * .google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility preemptibility = 10 [(.google.api.field_behavior) = OPTIONAL]; + *
+ * + * @return The enum numeric value on the wire for preemptibility. + */ + @java.lang.Override + public int getPreemptibilityValue() { + return preemptibility_; + } + /** + * + * + *+ * Optional. Specifies the preemptibility of the instance group. + * The default value for master and worker groups is + * `NON_PREEMPTIBLE`. This default cannot be changed. + * The default value for secondary instances is + * `PREEMPTIBLE`. + *+ * + *+ * .google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility preemptibility = 10 [(.google.api.field_behavior) = OPTIONAL]; + *
+ * + * @return The preemptibility. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility getPreemptibility() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility result = + com.google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility.valueOf(preemptibility_); + return result == null + ? com.google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility.UNRECOGNIZED + : result; + } + public static final int MANAGED_GROUP_CONFIG_FIELD_NUMBER = 7; private com.google.cloud.dataproc.v1.ManagedGroupConfig managedGroupConfig_; /** @@ -735,6 +961,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (!getMinCpuPlatformBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 9, minCpuPlatform_); } + if (preemptibility_ + != com.google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility + .PREEMPTIBILITY_UNSPECIFIED + .getNumber()) { + output.writeEnum(10, preemptibility_); + } unknownFields.writeTo(output); } @@ -776,6 +1008,12 @@ public int getSerializedSize() { if (!getMinCpuPlatformBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(9, minCpuPlatform_); } + if (preemptibility_ + != com.google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility + .PREEMPTIBILITY_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(10, preemptibility_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -801,6 +1039,7 @@ public boolean equals(final java.lang.Object obj) { if (!getDiskConfig().equals(other.getDiskConfig())) return false; } if (getIsPreemptible() != other.getIsPreemptible()) return false; + if (preemptibility_ != other.preemptibility_) return false; if (hasManagedGroupConfig() != other.hasManagedGroupConfig()) return false; if (hasManagedGroupConfig()) { if (!getManagedGroupConfig().equals(other.getManagedGroupConfig())) return false; @@ -834,6 +1073,8 @@ public int hashCode() { } hash = (37 * hash) + IS_PREEMPTIBLE_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIsPreemptible()); + hash = (37 * hash) + PREEMPTIBILITY_FIELD_NUMBER; + hash = (53 * hash) + preemptibility_; if (hasManagedGroupConfig()) { hash = (37 * hash) + MANAGED_GROUP_CONFIG_FIELD_NUMBER; hash = (53 * hash) + getManagedGroupConfig().hashCode(); @@ -1008,6 +1249,8 @@ public Builder clear() { } isPreemptible_ = false; + preemptibility_ = 0; + if (managedGroupConfigBuilder_ == null) { managedGroupConfig_ = null; } else { @@ -1064,6 +1307,7 @@ public com.google.cloud.dataproc.v1.InstanceGroupConfig buildPartial() { result.diskConfig_ = diskConfigBuilder_.build(); } result.isPreemptible_ = isPreemptible_; + result.preemptibility_ = preemptibility_; if (managedGroupConfigBuilder_ == null) { result.managedGroupConfig_ = managedGroupConfig_; } else { @@ -1156,6 +1400,9 @@ public Builder mergeFrom(com.google.cloud.dataproc.v1.InstanceGroupConfig other) if (other.getIsPreemptible() != false) { setIsPreemptible(other.getIsPreemptible()); } + if (other.preemptibility_ != 0) { + setPreemptibilityValue(other.getPreemptibilityValue()); + } if (other.hasManagedGroupConfig()) { mergeManagedGroupConfig(other.getManagedGroupConfig()); } @@ -2023,6 +2270,130 @@ public Builder clearIsPreemptible() { return this; } + private int preemptibility_ = 0; + /** + * + * + *+ * Optional. Specifies the preemptibility of the instance group. + * The default value for master and worker groups is + * `NON_PREEMPTIBLE`. This default cannot be changed. + * The default value for secondary instances is + * `PREEMPTIBLE`. + *+ * + *+ * .google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility preemptibility = 10 [(.google.api.field_behavior) = OPTIONAL]; + *
+ * + * @return The enum numeric value on the wire for preemptibility. + */ + @java.lang.Override + public int getPreemptibilityValue() { + return preemptibility_; + } + /** + * + * + *+ * Optional. Specifies the preemptibility of the instance group. + * The default value for master and worker groups is + * `NON_PREEMPTIBLE`. This default cannot be changed. + * The default value for secondary instances is + * `PREEMPTIBLE`. + *+ * + *+ * .google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility preemptibility = 10 [(.google.api.field_behavior) = OPTIONAL]; + *
+ * + * @param value The enum numeric value on the wire for preemptibility to set. + * @return This builder for chaining. + */ + public Builder setPreemptibilityValue(int value) { + + preemptibility_ = value; + onChanged(); + return this; + } + /** + * + * + *+ * Optional. Specifies the preemptibility of the instance group. + * The default value for master and worker groups is + * `NON_PREEMPTIBLE`. This default cannot be changed. + * The default value for secondary instances is + * `PREEMPTIBLE`. + *+ * + *+ * .google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility preemptibility = 10 [(.google.api.field_behavior) = OPTIONAL]; + *
+ * + * @return The preemptibility. + */ + @java.lang.Override + public com.google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility getPreemptibility() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility result = + com.google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility.valueOf(preemptibility_); + return result == null + ? com.google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility.UNRECOGNIZED + : result; + } + /** + * + * + *+ * Optional. Specifies the preemptibility of the instance group. + * The default value for master and worker groups is + * `NON_PREEMPTIBLE`. This default cannot be changed. + * The default value for secondary instances is + * `PREEMPTIBLE`. + *+ * + *+ * .google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility preemptibility = 10 [(.google.api.field_behavior) = OPTIONAL]; + *
+ * + * @param value The preemptibility to set. + * @return This builder for chaining. + */ + public Builder setPreemptibility( + com.google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility value) { + if (value == null) { + throw new NullPointerException(); + } + + preemptibility_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *+ * Optional. Specifies the preemptibility of the instance group. + * The default value for master and worker groups is + * `NON_PREEMPTIBLE`. This default cannot be changed. + * The default value for secondary instances is + * `PREEMPTIBLE`. + *+ * + *+ * .google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility preemptibility = 10 [(.google.api.field_behavior) = OPTIONAL]; + *
+ * + * @return This builder for chaining. + */ + public Builder clearPreemptibility() { + + preemptibility_ = 0; + onChanged(); + return this; + } + private com.google.cloud.dataproc.v1.ManagedGroupConfig managedGroupConfig_; private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.ManagedGroupConfig, diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfigOrBuilder.java index dda45e3f..9709605d 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfigOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfigOrBuilder.java @@ -237,6 +237,43 @@ public interface InstanceGroupConfigOrBuilder */ boolean getIsPreemptible(); + /** + * + * + *+ * Optional. Specifies the preemptibility of the instance group. + * The default value for master and worker groups is + * `NON_PREEMPTIBLE`. This default cannot be changed. + * The default value for secondary instances is + * `PREEMPTIBLE`. + *+ * + *+ * .google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility preemptibility = 10 [(.google.api.field_behavior) = OPTIONAL]; + *
+ * + * @return The enum numeric value on the wire for preemptibility. + */ + int getPreemptibilityValue(); + /** + * + * + *+ * Optional. Specifies the preemptibility of the instance group. + * The default value for master and worker groups is + * `NON_PREEMPTIBLE`. This default cannot be changed. + * The default value for secondary instances is + * `PREEMPTIBLE`. + *+ * + *+ * .google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility preemptibility = 10 [(.google.api.field_behavior) = OPTIONAL]; + *
+ * + * @return The preemptibility. + */ + com.google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility getPreemptibility(); + /** * * diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReference.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReference.java index 22b74dc6..3aa591a6 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReference.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReference.java @@ -125,11 +125,11 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *- * Required. The ID of the Google Cloud Platform project that the job - * belongs to. + * Optional. The ID of the Google Cloud Platform project that the job belongs to. If + * specified, must match the request project ID. ** - *string project_id = 1 [(.google.api.field_behavior) = REQUIRED];
+ *string project_id = 1 [(.google.api.field_behavior) = OPTIONAL];
* * @return The projectId. */ @@ -149,11 +149,11 @@ public java.lang.String getProjectId() { * * *- * Required. The ID of the Google Cloud Platform project that the job - * belongs to. + * Optional. The ID of the Google Cloud Platform project that the job belongs to. If + * specified, must match the request project ID. ** - *string project_id = 1 [(.google.api.field_behavior) = REQUIRED];
+ *string project_id = 1 [(.google.api.field_behavior) = OPTIONAL];
* * @return The bytes for projectId. */ @@ -562,11 +562,11 @@ public Builder mergeFrom( * * *- * Required. The ID of the Google Cloud Platform project that the job - * belongs to. + * Optional. The ID of the Google Cloud Platform project that the job belongs to. If + * specified, must match the request project ID. ** - *string project_id = 1 [(.google.api.field_behavior) = REQUIRED];
+ *string project_id = 1 [(.google.api.field_behavior) = OPTIONAL];
* * @return The projectId. */ @@ -585,11 +585,11 @@ public java.lang.String getProjectId() { * * *- * Required. The ID of the Google Cloud Platform project that the job - * belongs to. + * Optional. The ID of the Google Cloud Platform project that the job belongs to. If + * specified, must match the request project ID. ** - *string project_id = 1 [(.google.api.field_behavior) = REQUIRED];
+ *string project_id = 1 [(.google.api.field_behavior) = OPTIONAL];
* * @return The bytes for projectId. */ @@ -608,11 +608,11 @@ public com.google.protobuf.ByteString getProjectIdBytes() { * * *- * Required. The ID of the Google Cloud Platform project that the job - * belongs to. + * Optional. The ID of the Google Cloud Platform project that the job belongs to. If + * specified, must match the request project ID. ** - *string project_id = 1 [(.google.api.field_behavior) = REQUIRED];
+ *string project_id = 1 [(.google.api.field_behavior) = OPTIONAL];
* * @param value The projectId to set. * @return This builder for chaining. @@ -630,11 +630,11 @@ public Builder setProjectId(java.lang.String value) { * * *- * Required. The ID of the Google Cloud Platform project that the job - * belongs to. + * Optional. The ID of the Google Cloud Platform project that the job belongs to. If + * specified, must match the request project ID. ** - *string project_id = 1 [(.google.api.field_behavior) = REQUIRED];
+ *string project_id = 1 [(.google.api.field_behavior) = OPTIONAL];
* * @return This builder for chaining. */ @@ -648,11 +648,11 @@ public Builder clearProjectId() { * * *- * Required. The ID of the Google Cloud Platform project that the job - * belongs to. + * Optional. The ID of the Google Cloud Platform project that the job belongs to. If + * specified, must match the request project ID. ** - *string project_id = 1 [(.google.api.field_behavior) = REQUIRED];
+ *string project_id = 1 [(.google.api.field_behavior) = OPTIONAL];
* * @param value The bytes for projectId to set. * @return This builder for chaining. diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReferenceOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReferenceOrBuilder.java index 5329294a..df102fa4 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReferenceOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReferenceOrBuilder.java @@ -27,11 +27,11 @@ public interface JobReferenceOrBuilder * * *- * Required. The ID of the Google Cloud Platform project that the job - * belongs to. + * Optional. The ID of the Google Cloud Platform project that the job belongs to. If + * specified, must match the request project ID. ** - *string project_id = 1 [(.google.api.field_behavior) = REQUIRED];
+ *string project_id = 1 [(.google.api.field_behavior) = OPTIONAL];
* * @return The projectId. */ @@ -40,11 +40,11 @@ public interface JobReferenceOrBuilder * * *- * Required. The ID of the Google Cloud Platform project that the job - * belongs to. + * Optional. The ID of the Google Cloud Platform project that the job belongs to. If + * specified, must match the request project ID. ** - *string project_id = 1 [(.google.api.field_behavior) = REQUIRED];
+ *string project_id = 1 [(.google.api.field_behavior) = OPTIONAL];
* * @return The bytes for projectId. */ diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobsProto.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobsProto.java index 78929c12..982ab839 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobsProto.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobsProto.java @@ -296,7 +296,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "\022\023\n\017ATTEMPT_FAILURE\020\t\"H\n\010Substate\022\017\n\013UNS" + "PECIFIED\020\000\022\r\n\tSUBMITTED\020\001\022\n\n\006QUEUED\020\002\022\020\n" + "\014STALE_STATUS\020\003\"<\n\014JobReference\022\027\n\nproje" - + "ct_id\030\001 \001(\tB\003\340A\002\022\023\n\006job_id\030\002 \001(\tB\003\340A\001\"\245\002" + + "ct_id\030\001 \001(\tB\003\340A\001\022\023\n\006job_id\030\002 \001(\tB\003\340A\001\"\245\002" + "\n\017YarnApplication\022\021\n\004name\030\001 \001(\tB\003\340A\002\022C\n\005" + "state\030\002 \001(\0162/.google.cloud.dataproc.v1.Y" + "arnApplication.StateB\003\340A\002\022\025\n\010progress\030\003 " diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJob.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJob.java index 7c35af75..959abd9f 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJob.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJob.java @@ -426,7 +426,15 @@ public com.google.protobuf.ByteString getStepIdBytes() { public static final int HADOOP_JOB_FIELD_NUMBER = 2; /** - *.google.cloud.dataproc.v1.HadoopJob hadoop_job = 2;
+ * + * + *+ * Optional. Job is a Hadoop job. + *+ * + *+ * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the hadoopJob field is set. */ @@ -435,7 +443,15 @@ public boolean hasHadoopJob() { return jobTypeCase_ == 2; } /** - *.google.cloud.dataproc.v1.HadoopJob hadoop_job = 2;
+ * + * + *+ * Optional. Job is a Hadoop job. + *+ * + *+ * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The hadoopJob. */ @@ -446,7 +462,17 @@ public com.google.cloud.dataproc.v1.HadoopJob getHadoopJob() { } return com.google.cloud.dataproc.v1.HadoopJob.getDefaultInstance(); } - /**.google.cloud.dataproc.v1.HadoopJob hadoop_job = 2;
*/ + /** + * + * + *+ * Optional. Job is a Hadoop job. + *+ * + *+ * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ @java.lang.Override public com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder() { if (jobTypeCase_ == 2) { @@ -457,7 +483,15 @@ public com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder() { public static final int SPARK_JOB_FIELD_NUMBER = 3; /** - *.google.cloud.dataproc.v1.SparkJob spark_job = 3;
+ * + * + *+ * Optional. Job is a Spark job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkJob spark_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the sparkJob field is set. */ @@ -466,7 +500,15 @@ public boolean hasSparkJob() { return jobTypeCase_ == 3; } /** - *.google.cloud.dataproc.v1.SparkJob spark_job = 3;
+ * + * + *+ * Optional. Job is a Spark job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkJob spark_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The sparkJob. */ @@ -477,7 +519,17 @@ public com.google.cloud.dataproc.v1.SparkJob getSparkJob() { } return com.google.cloud.dataproc.v1.SparkJob.getDefaultInstance(); } - /**.google.cloud.dataproc.v1.SparkJob spark_job = 3;
*/ + /** + * + * + *+ * Optional. Job is a Spark job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkJob spark_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ @java.lang.Override public com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder() { if (jobTypeCase_ == 3) { @@ -488,7 +540,15 @@ public com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder() { public static final int PYSPARK_JOB_FIELD_NUMBER = 4; /** - *.google.cloud.dataproc.v1.PySparkJob pyspark_job = 4;
+ * + * + *+ * Optional. Job is a PySpark job. + *+ * + *+ * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the pysparkJob field is set. */ @@ -497,7 +557,15 @@ public boolean hasPysparkJob() { return jobTypeCase_ == 4; } /** - *.google.cloud.dataproc.v1.PySparkJob pyspark_job = 4;
+ * + * + *+ * Optional. Job is a PySpark job. + *+ * + *+ * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The pysparkJob. */ @@ -508,7 +576,17 @@ public com.google.cloud.dataproc.v1.PySparkJob getPysparkJob() { } return com.google.cloud.dataproc.v1.PySparkJob.getDefaultInstance(); } - /**.google.cloud.dataproc.v1.PySparkJob pyspark_job = 4;
*/ + /** + * + * + *+ * Optional. Job is a PySpark job. + *+ * + *+ * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ @java.lang.Override public com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder() { if (jobTypeCase_ == 4) { @@ -519,7 +597,14 @@ public com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder() public static final int HIVE_JOB_FIELD_NUMBER = 5; /** - *.google.cloud.dataproc.v1.HiveJob hive_job = 5;
+ * + * + *+ * Optional. Job is a Hive job. + *+ * + *.google.cloud.dataproc.v1.HiveJob hive_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the hiveJob field is set. */ @@ -528,7 +613,14 @@ public boolean hasHiveJob() { return jobTypeCase_ == 5; } /** - *.google.cloud.dataproc.v1.HiveJob hive_job = 5;
+ * + * + *+ * Optional. Job is a Hive job. + *+ * + *.google.cloud.dataproc.v1.HiveJob hive_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The hiveJob. */ @@ -539,7 +631,16 @@ public com.google.cloud.dataproc.v1.HiveJob getHiveJob() { } return com.google.cloud.dataproc.v1.HiveJob.getDefaultInstance(); } - /**.google.cloud.dataproc.v1.HiveJob hive_job = 5;
*/ + /** + * + * + *+ * Optional. Job is a Hive job. + *+ * + *.google.cloud.dataproc.v1.HiveJob hive_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ @java.lang.Override public com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder() { if (jobTypeCase_ == 5) { @@ -550,7 +651,14 @@ public com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder() { public static final int PIG_JOB_FIELD_NUMBER = 6; /** - *.google.cloud.dataproc.v1.PigJob pig_job = 6;
+ * + * + *+ * Optional. Job is a Pig job. + *+ * + *.google.cloud.dataproc.v1.PigJob pig_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the pigJob field is set. */ @@ -559,7 +667,14 @@ public boolean hasPigJob() { return jobTypeCase_ == 6; } /** - *.google.cloud.dataproc.v1.PigJob pig_job = 6;
+ * + * + *+ * Optional. Job is a Pig job. + *+ * + *.google.cloud.dataproc.v1.PigJob pig_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The pigJob. */ @@ -570,7 +685,16 @@ public com.google.cloud.dataproc.v1.PigJob getPigJob() { } return com.google.cloud.dataproc.v1.PigJob.getDefaultInstance(); } - /**.google.cloud.dataproc.v1.PigJob pig_job = 6;
*/ + /** + * + * + *+ * Optional. Job is a Pig job. + *+ * + *.google.cloud.dataproc.v1.PigJob pig_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ @java.lang.Override public com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder() { if (jobTypeCase_ == 6) { @@ -584,10 +708,12 @@ public com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder() { * * *- * Spark R job + * Optional. Job is a SparkR job. ** - *.google.cloud.dataproc.v1.SparkRJob spark_r_job = 11;
+ *+ * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 11 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the sparkRJob field is set. */ @@ -599,10 +725,12 @@ public boolean hasSparkRJob() { * * *- * Spark R job + * Optional. Job is a SparkR job. ** - *.google.cloud.dataproc.v1.SparkRJob spark_r_job = 11;
+ *+ * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 11 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The sparkRJob. */ @@ -617,10 +745,12 @@ public com.google.cloud.dataproc.v1.SparkRJob getSparkRJob() { * * *- * Spark R job + * Optional. Job is a SparkR job. ** - *.google.cloud.dataproc.v1.SparkRJob spark_r_job = 11;
+ *+ * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 11 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ @java.lang.Override public com.google.cloud.dataproc.v1.SparkRJobOrBuilder getSparkRJobOrBuilder() { @@ -632,7 +762,15 @@ public com.google.cloud.dataproc.v1.SparkRJobOrBuilder getSparkRJobOrBuilder() { public static final int SPARK_SQL_JOB_FIELD_NUMBER = 7; /** - *.google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7;
+ * + * + *+ * Optional. Job is a SparkSql job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the sparkSqlJob field is set. */ @@ -641,7 +779,15 @@ public boolean hasSparkSqlJob() { return jobTypeCase_ == 7; } /** - *.google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7;
+ * + * + *+ * Optional. Job is a SparkSql job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The sparkSqlJob. */ @@ -652,7 +798,17 @@ public com.google.cloud.dataproc.v1.SparkSqlJob getSparkSqlJob() { } return com.google.cloud.dataproc.v1.SparkSqlJob.getDefaultInstance(); } - /**.google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7;
*/ + /** + * + * + *+ * Optional. Job is a SparkSql job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ @java.lang.Override public com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder() { if (jobTypeCase_ == 7) { @@ -666,10 +822,12 @@ public com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder * * *- * Presto job + * Optional. Job is a Presto job. ** - *.google.cloud.dataproc.v1.PrestoJob presto_job = 12;
+ *+ * .google.cloud.dataproc.v1.PrestoJob presto_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the prestoJob field is set. */ @@ -681,10 +839,12 @@ public boolean hasPrestoJob() { * * *- * Presto job + * Optional. Job is a Presto job. ** - *.google.cloud.dataproc.v1.PrestoJob presto_job = 12;
+ *+ * .google.cloud.dataproc.v1.PrestoJob presto_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The prestoJob. */ @@ -699,10 +859,12 @@ public com.google.cloud.dataproc.v1.PrestoJob getPrestoJob() { * * *- * Presto job + * Optional. Job is a Presto job. ** - *.google.cloud.dataproc.v1.PrestoJob presto_job = 12;
+ *+ * .google.cloud.dataproc.v1.PrestoJob presto_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ @java.lang.Override public com.google.cloud.dataproc.v1.PrestoJobOrBuilder getPrestoJobOrBuilder() { @@ -1778,7 +1940,15 @@ public Builder setStepIdBytes(com.google.protobuf.ByteString value) { com.google.cloud.dataproc.v1.HadoopJobOrBuilder> hadoopJobBuilder_; /** - *.google.cloud.dataproc.v1.HadoopJob hadoop_job = 2;
+ * + * + *+ * Optional. Job is a Hadoop job. + *+ * + *+ * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the hadoopJob field is set. */ @@ -1787,7 +1957,15 @@ public boolean hasHadoopJob() { return jobTypeCase_ == 2; } /** - *.google.cloud.dataproc.v1.HadoopJob hadoop_job = 2;
+ * + * + *+ * Optional. Job is a Hadoop job. + *+ * + *+ * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The hadoopJob. */ @@ -1805,7 +1983,17 @@ public com.google.cloud.dataproc.v1.HadoopJob getHadoopJob() { return com.google.cloud.dataproc.v1.HadoopJob.getDefaultInstance(); } } - /**.google.cloud.dataproc.v1.HadoopJob hadoop_job = 2;
*/ + /** + * + * + *+ * Optional. Job is a Hadoop job. + *+ * + *+ * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder setHadoopJob(com.google.cloud.dataproc.v1.HadoopJob value) { if (hadoopJobBuilder_ == null) { if (value == null) { @@ -1819,7 +2007,17 @@ public Builder setHadoopJob(com.google.cloud.dataproc.v1.HadoopJob value) { jobTypeCase_ = 2; return this; } - /**.google.cloud.dataproc.v1.HadoopJob hadoop_job = 2;
*/ + /** + * + * + *+ * Optional. Job is a Hadoop job. + *+ * + *+ * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder setHadoopJob(com.google.cloud.dataproc.v1.HadoopJob.Builder builderForValue) { if (hadoopJobBuilder_ == null) { jobType_ = builderForValue.build(); @@ -1830,7 +2028,17 @@ public Builder setHadoopJob(com.google.cloud.dataproc.v1.HadoopJob.Builder build jobTypeCase_ = 2; return this; } - /**.google.cloud.dataproc.v1.HadoopJob hadoop_job = 2;
*/ + /** + * + * + *+ * Optional. Job is a Hadoop job. + *+ * + *+ * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder mergeHadoopJob(com.google.cloud.dataproc.v1.HadoopJob value) { if (hadoopJobBuilder_ == null) { if (jobTypeCase_ == 2 @@ -1853,7 +2061,17 @@ public Builder mergeHadoopJob(com.google.cloud.dataproc.v1.HadoopJob value) { jobTypeCase_ = 2; return this; } - /**.google.cloud.dataproc.v1.HadoopJob hadoop_job = 2;
*/ + /** + * + * + *+ * Optional. Job is a Hadoop job. + *+ * + *+ * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder clearHadoopJob() { if (hadoopJobBuilder_ == null) { if (jobTypeCase_ == 2) { @@ -1870,11 +2088,31 @@ public Builder clearHadoopJob() { } return this; } - /**.google.cloud.dataproc.v1.HadoopJob hadoop_job = 2;
*/ + /** + * + * + *+ * Optional. Job is a Hadoop job. + *+ * + *+ * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public com.google.cloud.dataproc.v1.HadoopJob.Builder getHadoopJobBuilder() { return getHadoopJobFieldBuilder().getBuilder(); } - /**.google.cloud.dataproc.v1.HadoopJob hadoop_job = 2;
*/ + /** + * + * + *+ * Optional. Job is a Hadoop job. + *+ * + *+ * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ @java.lang.Override public com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder() { if ((jobTypeCase_ == 2) && (hadoopJobBuilder_ != null)) { @@ -1886,7 +2124,17 @@ public com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder() { return com.google.cloud.dataproc.v1.HadoopJob.getDefaultInstance(); } } - /**.google.cloud.dataproc.v1.HadoopJob hadoop_job = 2;
*/ + /** + * + * + *+ * Optional. Job is a Hadoop job. + *+ * + *+ * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.HadoopJob, com.google.cloud.dataproc.v1.HadoopJob.Builder, @@ -1918,7 +2166,15 @@ public com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder() { com.google.cloud.dataproc.v1.SparkJobOrBuilder> sparkJobBuilder_; /** - *.google.cloud.dataproc.v1.SparkJob spark_job = 3;
+ * + * + *+ * Optional. Job is a Spark job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkJob spark_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the sparkJob field is set. */ @@ -1927,7 +2183,15 @@ public boolean hasSparkJob() { return jobTypeCase_ == 3; } /** - *.google.cloud.dataproc.v1.SparkJob spark_job = 3;
+ * + * + *+ * Optional. Job is a Spark job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkJob spark_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The sparkJob. */ @@ -1945,7 +2209,17 @@ public com.google.cloud.dataproc.v1.SparkJob getSparkJob() { return com.google.cloud.dataproc.v1.SparkJob.getDefaultInstance(); } } - /**.google.cloud.dataproc.v1.SparkJob spark_job = 3;
*/ + /** + * + * + *+ * Optional. Job is a Spark job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkJob spark_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder setSparkJob(com.google.cloud.dataproc.v1.SparkJob value) { if (sparkJobBuilder_ == null) { if (value == null) { @@ -1959,7 +2233,17 @@ public Builder setSparkJob(com.google.cloud.dataproc.v1.SparkJob value) { jobTypeCase_ = 3; return this; } - /**.google.cloud.dataproc.v1.SparkJob spark_job = 3;
*/ + /** + * + * + *+ * Optional. Job is a Spark job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkJob spark_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder setSparkJob(com.google.cloud.dataproc.v1.SparkJob.Builder builderForValue) { if (sparkJobBuilder_ == null) { jobType_ = builderForValue.build(); @@ -1970,10 +2254,20 @@ public Builder setSparkJob(com.google.cloud.dataproc.v1.SparkJob.Builder builder jobTypeCase_ = 3; return this; } - /**.google.cloud.dataproc.v1.SparkJob spark_job = 3;
*/ - public Builder mergeSparkJob(com.google.cloud.dataproc.v1.SparkJob value) { - if (sparkJobBuilder_ == null) { - if (jobTypeCase_ == 3 + /** + * + * + *+ * Optional. Job is a Spark job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkJob spark_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ + public Builder mergeSparkJob(com.google.cloud.dataproc.v1.SparkJob value) { + if (sparkJobBuilder_ == null) { + if (jobTypeCase_ == 3 && jobType_ != com.google.cloud.dataproc.v1.SparkJob.getDefaultInstance()) { jobType_ = com.google.cloud.dataproc.v1.SparkJob.newBuilder( @@ -1993,7 +2287,17 @@ public Builder mergeSparkJob(com.google.cloud.dataproc.v1.SparkJob value) { jobTypeCase_ = 3; return this; } - /**.google.cloud.dataproc.v1.SparkJob spark_job = 3;
*/ + /** + * + * + *+ * Optional. Job is a Spark job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkJob spark_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder clearSparkJob() { if (sparkJobBuilder_ == null) { if (jobTypeCase_ == 3) { @@ -2010,11 +2314,31 @@ public Builder clearSparkJob() { } return this; } - /**.google.cloud.dataproc.v1.SparkJob spark_job = 3;
*/ + /** + * + * + *+ * Optional. Job is a Spark job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkJob spark_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public com.google.cloud.dataproc.v1.SparkJob.Builder getSparkJobBuilder() { return getSparkJobFieldBuilder().getBuilder(); } - /**.google.cloud.dataproc.v1.SparkJob spark_job = 3;
*/ + /** + * + * + *+ * Optional. Job is a Spark job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkJob spark_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ @java.lang.Override public com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder() { if ((jobTypeCase_ == 3) && (sparkJobBuilder_ != null)) { @@ -2026,7 +2350,17 @@ public com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder() { return com.google.cloud.dataproc.v1.SparkJob.getDefaultInstance(); } } - /**.google.cloud.dataproc.v1.SparkJob spark_job = 3;
*/ + /** + * + * + *+ * Optional. Job is a Spark job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkJob spark_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.SparkJob, com.google.cloud.dataproc.v1.SparkJob.Builder, @@ -2058,7 +2392,15 @@ public com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder() { com.google.cloud.dataproc.v1.PySparkJobOrBuilder> pysparkJobBuilder_; /** - *.google.cloud.dataproc.v1.PySparkJob pyspark_job = 4;
+ * + * + *+ * Optional. Job is a PySpark job. + *+ * + *+ * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the pysparkJob field is set. */ @@ -2067,7 +2409,15 @@ public boolean hasPysparkJob() { return jobTypeCase_ == 4; } /** - *.google.cloud.dataproc.v1.PySparkJob pyspark_job = 4;
+ * + * + *+ * Optional. Job is a PySpark job. + *+ * + *+ * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The pysparkJob. */ @@ -2085,7 +2435,17 @@ public com.google.cloud.dataproc.v1.PySparkJob getPysparkJob() { return com.google.cloud.dataproc.v1.PySparkJob.getDefaultInstance(); } } - /**.google.cloud.dataproc.v1.PySparkJob pyspark_job = 4;
*/ + /** + * + * + *+ * Optional. Job is a PySpark job. + *+ * + *+ * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder setPysparkJob(com.google.cloud.dataproc.v1.PySparkJob value) { if (pysparkJobBuilder_ == null) { if (value == null) { @@ -2099,7 +2459,17 @@ public Builder setPysparkJob(com.google.cloud.dataproc.v1.PySparkJob value) { jobTypeCase_ = 4; return this; } - /**.google.cloud.dataproc.v1.PySparkJob pyspark_job = 4;
*/ + /** + * + * + *+ * Optional. Job is a PySpark job. + *+ * + *+ * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder setPysparkJob(com.google.cloud.dataproc.v1.PySparkJob.Builder builderForValue) { if (pysparkJobBuilder_ == null) { jobType_ = builderForValue.build(); @@ -2110,7 +2480,17 @@ public Builder setPysparkJob(com.google.cloud.dataproc.v1.PySparkJob.Builder bui jobTypeCase_ = 4; return this; } - /**.google.cloud.dataproc.v1.PySparkJob pyspark_job = 4;
*/ + /** + * + * + *+ * Optional. Job is a PySpark job. + *+ * + *+ * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder mergePysparkJob(com.google.cloud.dataproc.v1.PySparkJob value) { if (pysparkJobBuilder_ == null) { if (jobTypeCase_ == 4 @@ -2133,7 +2513,17 @@ public Builder mergePysparkJob(com.google.cloud.dataproc.v1.PySparkJob value) { jobTypeCase_ = 4; return this; } - /**.google.cloud.dataproc.v1.PySparkJob pyspark_job = 4;
*/ + /** + * + * + *+ * Optional. Job is a PySpark job. + *+ * + *+ * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder clearPysparkJob() { if (pysparkJobBuilder_ == null) { if (jobTypeCase_ == 4) { @@ -2150,11 +2540,31 @@ public Builder clearPysparkJob() { } return this; } - /**.google.cloud.dataproc.v1.PySparkJob pyspark_job = 4;
*/ + /** + * + * + *+ * Optional. Job is a PySpark job. + *+ * + *+ * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public com.google.cloud.dataproc.v1.PySparkJob.Builder getPysparkJobBuilder() { return getPysparkJobFieldBuilder().getBuilder(); } - /**.google.cloud.dataproc.v1.PySparkJob pyspark_job = 4;
*/ + /** + * + * + *+ * Optional. Job is a PySpark job. + *+ * + *+ * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ @java.lang.Override public com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder() { if ((jobTypeCase_ == 4) && (pysparkJobBuilder_ != null)) { @@ -2166,7 +2576,17 @@ public com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder() return com.google.cloud.dataproc.v1.PySparkJob.getDefaultInstance(); } } - /**.google.cloud.dataproc.v1.PySparkJob pyspark_job = 4;
*/ + /** + * + * + *+ * Optional. Job is a PySpark job. + *+ * + *+ * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.PySparkJob, com.google.cloud.dataproc.v1.PySparkJob.Builder, @@ -2198,7 +2618,15 @@ public com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder() com.google.cloud.dataproc.v1.HiveJobOrBuilder> hiveJobBuilder_; /** - *.google.cloud.dataproc.v1.HiveJob hive_job = 5;
+ * + * + *+ * Optional. Job is a Hive job. + *+ * + *+ * .google.cloud.dataproc.v1.HiveJob hive_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the hiveJob field is set. */ @@ -2207,7 +2635,15 @@ public boolean hasHiveJob() { return jobTypeCase_ == 5; } /** - *.google.cloud.dataproc.v1.HiveJob hive_job = 5;
+ * + * + *+ * Optional. Job is a Hive job. + *+ * + *+ * .google.cloud.dataproc.v1.HiveJob hive_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The hiveJob. */ @@ -2225,7 +2661,17 @@ public com.google.cloud.dataproc.v1.HiveJob getHiveJob() { return com.google.cloud.dataproc.v1.HiveJob.getDefaultInstance(); } } - /**.google.cloud.dataproc.v1.HiveJob hive_job = 5;
*/ + /** + * + * + *+ * Optional. Job is a Hive job. + *+ * + *+ * .google.cloud.dataproc.v1.HiveJob hive_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder setHiveJob(com.google.cloud.dataproc.v1.HiveJob value) { if (hiveJobBuilder_ == null) { if (value == null) { @@ -2239,7 +2685,17 @@ public Builder setHiveJob(com.google.cloud.dataproc.v1.HiveJob value) { jobTypeCase_ = 5; return this; } - /**.google.cloud.dataproc.v1.HiveJob hive_job = 5;
*/ + /** + * + * + *+ * Optional. Job is a Hive job. + *+ * + *+ * .google.cloud.dataproc.v1.HiveJob hive_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder setHiveJob(com.google.cloud.dataproc.v1.HiveJob.Builder builderForValue) { if (hiveJobBuilder_ == null) { jobType_ = builderForValue.build(); @@ -2250,7 +2706,17 @@ public Builder setHiveJob(com.google.cloud.dataproc.v1.HiveJob.Builder builderFo jobTypeCase_ = 5; return this; } - /**.google.cloud.dataproc.v1.HiveJob hive_job = 5;
*/ + /** + * + * + *+ * Optional. Job is a Hive job. + *+ * + *+ * .google.cloud.dataproc.v1.HiveJob hive_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder mergeHiveJob(com.google.cloud.dataproc.v1.HiveJob value) { if (hiveJobBuilder_ == null) { if (jobTypeCase_ == 5 @@ -2273,7 +2739,17 @@ public Builder mergeHiveJob(com.google.cloud.dataproc.v1.HiveJob value) { jobTypeCase_ = 5; return this; } - /**.google.cloud.dataproc.v1.HiveJob hive_job = 5;
*/ + /** + * + * + *+ * Optional. Job is a Hive job. + *+ * + *+ * .google.cloud.dataproc.v1.HiveJob hive_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder clearHiveJob() { if (hiveJobBuilder_ == null) { if (jobTypeCase_ == 5) { @@ -2290,11 +2766,31 @@ public Builder clearHiveJob() { } return this; } - /**.google.cloud.dataproc.v1.HiveJob hive_job = 5;
*/ + /** + * + * + *+ * Optional. Job is a Hive job. + *+ * + *+ * .google.cloud.dataproc.v1.HiveJob hive_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public com.google.cloud.dataproc.v1.HiveJob.Builder getHiveJobBuilder() { return getHiveJobFieldBuilder().getBuilder(); } - /**.google.cloud.dataproc.v1.HiveJob hive_job = 5;
*/ + /** + * + * + *+ * Optional. Job is a Hive job. + *+ * + *+ * .google.cloud.dataproc.v1.HiveJob hive_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ @java.lang.Override public com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder() { if ((jobTypeCase_ == 5) && (hiveJobBuilder_ != null)) { @@ -2306,7 +2802,17 @@ public com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder() { return com.google.cloud.dataproc.v1.HiveJob.getDefaultInstance(); } } - /**.google.cloud.dataproc.v1.HiveJob hive_job = 5;
*/ + /** + * + * + *+ * Optional. Job is a Hive job. + *+ * + *+ * .google.cloud.dataproc.v1.HiveJob hive_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.HiveJob, com.google.cloud.dataproc.v1.HiveJob.Builder, @@ -2336,7 +2842,14 @@ public com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder() { com.google.cloud.dataproc.v1.PigJobOrBuilder> pigJobBuilder_; /** - *.google.cloud.dataproc.v1.PigJob pig_job = 6;
+ * + * + *+ * Optional. Job is a Pig job. + *+ * + *.google.cloud.dataproc.v1.PigJob pig_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the pigJob field is set. */ @@ -2345,7 +2858,14 @@ public boolean hasPigJob() { return jobTypeCase_ == 6; } /** - *.google.cloud.dataproc.v1.PigJob pig_job = 6;
+ * + * + *+ * Optional. Job is a Pig job. + *+ * + *.google.cloud.dataproc.v1.PigJob pig_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The pigJob. */ @@ -2363,7 +2883,16 @@ public com.google.cloud.dataproc.v1.PigJob getPigJob() { return com.google.cloud.dataproc.v1.PigJob.getDefaultInstance(); } } - /**.google.cloud.dataproc.v1.PigJob pig_job = 6;
*/ + /** + * + * + *+ * Optional. Job is a Pig job. + *+ * + *.google.cloud.dataproc.v1.PigJob pig_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder setPigJob(com.google.cloud.dataproc.v1.PigJob value) { if (pigJobBuilder_ == null) { if (value == null) { @@ -2377,7 +2906,16 @@ public Builder setPigJob(com.google.cloud.dataproc.v1.PigJob value) { jobTypeCase_ = 6; return this; } - /**.google.cloud.dataproc.v1.PigJob pig_job = 6;
*/ + /** + * + * + *+ * Optional. Job is a Pig job. + *+ * + *.google.cloud.dataproc.v1.PigJob pig_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder setPigJob(com.google.cloud.dataproc.v1.PigJob.Builder builderForValue) { if (pigJobBuilder_ == null) { jobType_ = builderForValue.build(); @@ -2388,7 +2926,16 @@ public Builder setPigJob(com.google.cloud.dataproc.v1.PigJob.Builder builderForV jobTypeCase_ = 6; return this; } - /**.google.cloud.dataproc.v1.PigJob pig_job = 6;
*/ + /** + * + * + *+ * Optional. Job is a Pig job. + *+ * + *.google.cloud.dataproc.v1.PigJob pig_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder mergePigJob(com.google.cloud.dataproc.v1.PigJob value) { if (pigJobBuilder_ == null) { if (jobTypeCase_ == 6 @@ -2411,7 +2958,16 @@ public Builder mergePigJob(com.google.cloud.dataproc.v1.PigJob value) { jobTypeCase_ = 6; return this; } - /**.google.cloud.dataproc.v1.PigJob pig_job = 6;
*/ + /** + * + * + *+ * Optional. Job is a Pig job. + *+ * + *.google.cloud.dataproc.v1.PigJob pig_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder clearPigJob() { if (pigJobBuilder_ == null) { if (jobTypeCase_ == 6) { @@ -2428,11 +2984,29 @@ public Builder clearPigJob() { } return this; } - /**.google.cloud.dataproc.v1.PigJob pig_job = 6;
*/ + /** + * + * + *+ * Optional. Job is a Pig job. + *+ * + *.google.cloud.dataproc.v1.PigJob pig_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public com.google.cloud.dataproc.v1.PigJob.Builder getPigJobBuilder() { return getPigJobFieldBuilder().getBuilder(); } - /**.google.cloud.dataproc.v1.PigJob pig_job = 6;
*/ + /** + * + * + *+ * Optional. Job is a Pig job. + *+ * + *.google.cloud.dataproc.v1.PigJob pig_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ @java.lang.Override public com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder() { if ((jobTypeCase_ == 6) && (pigJobBuilder_ != null)) { @@ -2444,7 +3018,16 @@ public com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder() { return com.google.cloud.dataproc.v1.PigJob.getDefaultInstance(); } } - /**.google.cloud.dataproc.v1.PigJob pig_job = 6;
*/ + /** + * + * + *+ * Optional. Job is a Pig job. + *+ * + *.google.cloud.dataproc.v1.PigJob pig_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.PigJob, com.google.cloud.dataproc.v1.PigJob.Builder, @@ -2477,10 +3060,12 @@ public com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder() { * * *- * Spark R job + * Optional. Job is a SparkR job. ** - *.google.cloud.dataproc.v1.SparkRJob spark_r_job = 11;
+ *+ * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 11 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the sparkRJob field is set. */ @@ -2492,10 +3077,12 @@ public boolean hasSparkRJob() { * * *- * Spark R job + * Optional. Job is a SparkR job. ** - *.google.cloud.dataproc.v1.SparkRJob spark_r_job = 11;
+ *+ * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 11 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The sparkRJob. */ @@ -2517,10 +3104,12 @@ public com.google.cloud.dataproc.v1.SparkRJob getSparkRJob() { * * *- * Spark R job + * Optional. Job is a SparkR job. ** - *.google.cloud.dataproc.v1.SparkRJob spark_r_job = 11;
+ *+ * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 11 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ public Builder setSparkRJob(com.google.cloud.dataproc.v1.SparkRJob value) { if (sparkRJobBuilder_ == null) { @@ -2539,10 +3128,12 @@ public Builder setSparkRJob(com.google.cloud.dataproc.v1.SparkRJob value) { * * *- * Spark R job + * Optional. Job is a SparkR job. ** - *.google.cloud.dataproc.v1.SparkRJob spark_r_job = 11;
+ *+ * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 11 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ public Builder setSparkRJob(com.google.cloud.dataproc.v1.SparkRJob.Builder builderForValue) { if (sparkRJobBuilder_ == null) { @@ -2558,10 +3149,12 @@ public Builder setSparkRJob(com.google.cloud.dataproc.v1.SparkRJob.Builder build * * *- * Spark R job + * Optional. Job is a SparkR job. ** - *.google.cloud.dataproc.v1.SparkRJob spark_r_job = 11;
+ *+ * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 11 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ public Builder mergeSparkRJob(com.google.cloud.dataproc.v1.SparkRJob value) { if (sparkRJobBuilder_ == null) { @@ -2589,10 +3182,12 @@ public Builder mergeSparkRJob(com.google.cloud.dataproc.v1.SparkRJob value) { * * *- * Spark R job + * Optional. Job is a SparkR job. ** - *.google.cloud.dataproc.v1.SparkRJob spark_r_job = 11;
+ *+ * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 11 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ public Builder clearSparkRJob() { if (sparkRJobBuilder_ == null) { @@ -2614,10 +3209,12 @@ public Builder clearSparkRJob() { * * *- * Spark R job + * Optional. Job is a SparkR job. ** - *.google.cloud.dataproc.v1.SparkRJob spark_r_job = 11;
+ *+ * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 11 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ public com.google.cloud.dataproc.v1.SparkRJob.Builder getSparkRJobBuilder() { return getSparkRJobFieldBuilder().getBuilder(); @@ -2626,10 +3223,12 @@ public com.google.cloud.dataproc.v1.SparkRJob.Builder getSparkRJobBuilder() { * * *- * Spark R job + * Optional. Job is a SparkR job. ** - *.google.cloud.dataproc.v1.SparkRJob spark_r_job = 11;
+ *+ * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 11 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ @java.lang.Override public com.google.cloud.dataproc.v1.SparkRJobOrBuilder getSparkRJobOrBuilder() { @@ -2646,10 +3245,12 @@ public com.google.cloud.dataproc.v1.SparkRJobOrBuilder getSparkRJobOrBuilder() { * * *- * Spark R job + * Optional. Job is a SparkR job. ** - *.google.cloud.dataproc.v1.SparkRJob spark_r_job = 11;
+ *+ * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 11 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.SparkRJob, @@ -2682,7 +3283,15 @@ public com.google.cloud.dataproc.v1.SparkRJobOrBuilder getSparkRJobOrBuilder() { com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder> sparkSqlJobBuilder_; /** - *.google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7;
+ * + * + *+ * Optional. Job is a SparkSql job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the sparkSqlJob field is set. */ @@ -2691,7 +3300,15 @@ public boolean hasSparkSqlJob() { return jobTypeCase_ == 7; } /** - *.google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7;
+ * + * + *+ * Optional. Job is a SparkSql job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The sparkSqlJob. */ @@ -2709,7 +3326,17 @@ public com.google.cloud.dataproc.v1.SparkSqlJob getSparkSqlJob() { return com.google.cloud.dataproc.v1.SparkSqlJob.getDefaultInstance(); } } - /**.google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7;
*/ + /** + * + * + *+ * Optional. Job is a SparkSql job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder setSparkSqlJob(com.google.cloud.dataproc.v1.SparkSqlJob value) { if (sparkSqlJobBuilder_ == null) { if (value == null) { @@ -2723,7 +3350,17 @@ public Builder setSparkSqlJob(com.google.cloud.dataproc.v1.SparkSqlJob value) { jobTypeCase_ = 7; return this; } - /**.google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7;
*/ + /** + * + * + *+ * Optional. Job is a SparkSql job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder setSparkSqlJob( com.google.cloud.dataproc.v1.SparkSqlJob.Builder builderForValue) { if (sparkSqlJobBuilder_ == null) { @@ -2735,7 +3372,17 @@ public Builder setSparkSqlJob( jobTypeCase_ = 7; return this; } - /**.google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7;
*/ + /** + * + * + *+ * Optional. Job is a SparkSql job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder mergeSparkSqlJob(com.google.cloud.dataproc.v1.SparkSqlJob value) { if (sparkSqlJobBuilder_ == null) { if (jobTypeCase_ == 7 @@ -2758,7 +3405,17 @@ public Builder mergeSparkSqlJob(com.google.cloud.dataproc.v1.SparkSqlJob value) jobTypeCase_ = 7; return this; } - /**.google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7;
*/ + /** + * + * + *+ * Optional. Job is a SparkSql job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public Builder clearSparkSqlJob() { if (sparkSqlJobBuilder_ == null) { if (jobTypeCase_ == 7) { @@ -2775,11 +3432,31 @@ public Builder clearSparkSqlJob() { } return this; } - /**.google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7;
*/ + /** + * + * + *+ * Optional. Job is a SparkSql job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ public com.google.cloud.dataproc.v1.SparkSqlJob.Builder getSparkSqlJobBuilder() { return getSparkSqlJobFieldBuilder().getBuilder(); } - /**.google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7;
*/ + /** + * + * + *+ * Optional. Job is a SparkSql job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ @java.lang.Override public com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder() { if ((jobTypeCase_ == 7) && (sparkSqlJobBuilder_ != null)) { @@ -2791,7 +3468,17 @@ public com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder return com.google.cloud.dataproc.v1.SparkSqlJob.getDefaultInstance(); } } - /**.google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7;
*/ + /** + * + * + *+ * Optional. Job is a SparkSql job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.SparkSqlJob, com.google.cloud.dataproc.v1.SparkSqlJob.Builder, @@ -2826,10 +3513,12 @@ public com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder * * *- * Presto job + * Optional. Job is a Presto job. ** - *.google.cloud.dataproc.v1.PrestoJob presto_job = 12;
+ *+ * .google.cloud.dataproc.v1.PrestoJob presto_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the prestoJob field is set. */ @@ -2841,10 +3530,12 @@ public boolean hasPrestoJob() { * * *- * Presto job + * Optional. Job is a Presto job. ** - *.google.cloud.dataproc.v1.PrestoJob presto_job = 12;
+ *+ * .google.cloud.dataproc.v1.PrestoJob presto_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The prestoJob. */ @@ -2866,10 +3557,12 @@ public com.google.cloud.dataproc.v1.PrestoJob getPrestoJob() { * * *- * Presto job + * Optional. Job is a Presto job. ** - *.google.cloud.dataproc.v1.PrestoJob presto_job = 12;
+ *+ * .google.cloud.dataproc.v1.PrestoJob presto_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ public Builder setPrestoJob(com.google.cloud.dataproc.v1.PrestoJob value) { if (prestoJobBuilder_ == null) { @@ -2888,10 +3581,12 @@ public Builder setPrestoJob(com.google.cloud.dataproc.v1.PrestoJob value) { * * *- * Presto job + * Optional. Job is a Presto job. ** - *.google.cloud.dataproc.v1.PrestoJob presto_job = 12;
+ *+ * .google.cloud.dataproc.v1.PrestoJob presto_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ public Builder setPrestoJob(com.google.cloud.dataproc.v1.PrestoJob.Builder builderForValue) { if (prestoJobBuilder_ == null) { @@ -2907,10 +3602,12 @@ public Builder setPrestoJob(com.google.cloud.dataproc.v1.PrestoJob.Builder build * * *- * Presto job + * Optional. Job is a Presto job. ** - *.google.cloud.dataproc.v1.PrestoJob presto_job = 12;
+ *+ * .google.cloud.dataproc.v1.PrestoJob presto_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ public Builder mergePrestoJob(com.google.cloud.dataproc.v1.PrestoJob value) { if (prestoJobBuilder_ == null) { @@ -2938,10 +3635,12 @@ public Builder mergePrestoJob(com.google.cloud.dataproc.v1.PrestoJob value) { * * *- * Presto job + * Optional. Job is a Presto job. ** - *.google.cloud.dataproc.v1.PrestoJob presto_job = 12;
+ *+ * .google.cloud.dataproc.v1.PrestoJob presto_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ public Builder clearPrestoJob() { if (prestoJobBuilder_ == null) { @@ -2963,10 +3662,12 @@ public Builder clearPrestoJob() { * * *- * Presto job + * Optional. Job is a Presto job. ** - *.google.cloud.dataproc.v1.PrestoJob presto_job = 12;
+ *+ * .google.cloud.dataproc.v1.PrestoJob presto_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ public com.google.cloud.dataproc.v1.PrestoJob.Builder getPrestoJobBuilder() { return getPrestoJobFieldBuilder().getBuilder(); @@ -2975,10 +3676,12 @@ public com.google.cloud.dataproc.v1.PrestoJob.Builder getPrestoJobBuilder() { * * *- * Presto job + * Optional. Job is a Presto job. ** - *.google.cloud.dataproc.v1.PrestoJob presto_job = 12;
+ *+ * .google.cloud.dataproc.v1.PrestoJob presto_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ @java.lang.Override public com.google.cloud.dataproc.v1.PrestoJobOrBuilder getPrestoJobOrBuilder() { @@ -2995,10 +3698,12 @@ public com.google.cloud.dataproc.v1.PrestoJobOrBuilder getPrestoJobOrBuilder() { * * *- * Presto job + * Optional. Job is a Presto job. ** - *.google.cloud.dataproc.v1.PrestoJob presto_job = 12;
+ *+ * .google.cloud.dataproc.v1.PrestoJob presto_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.PrestoJob, diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJobOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJobOrBuilder.java index 27230208..198e4fca 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJobOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJobOrBuilder.java @@ -65,88 +65,214 @@ public interface OrderedJobOrBuilder com.google.protobuf.ByteString getStepIdBytes(); /** - *.google.cloud.dataproc.v1.HadoopJob hadoop_job = 2;
+ * + * + *+ * Optional. Job is a Hadoop job. + *+ * + *+ * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the hadoopJob field is set. */ boolean hasHadoopJob(); /** - *.google.cloud.dataproc.v1.HadoopJob hadoop_job = 2;
+ * + * + *+ * Optional. Job is a Hadoop job. + *+ * + *+ * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The hadoopJob. */ com.google.cloud.dataproc.v1.HadoopJob getHadoopJob(); - /**.google.cloud.dataproc.v1.HadoopJob hadoop_job = 2;
*/ + /** + * + * + *+ * Optional. Job is a Hadoop job. + *+ * + *+ * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder(); /** - *.google.cloud.dataproc.v1.SparkJob spark_job = 3;
+ * + * + *+ * Optional. Job is a Spark job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkJob spark_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the sparkJob field is set. */ boolean hasSparkJob(); /** - *.google.cloud.dataproc.v1.SparkJob spark_job = 3;
+ * + * + *+ * Optional. Job is a Spark job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkJob spark_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The sparkJob. */ com.google.cloud.dataproc.v1.SparkJob getSparkJob(); - /**.google.cloud.dataproc.v1.SparkJob spark_job = 3;
*/ + /** + * + * + *+ * Optional. Job is a Spark job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkJob spark_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder(); /** - *.google.cloud.dataproc.v1.PySparkJob pyspark_job = 4;
+ * + * + *+ * Optional. Job is a PySpark job. + *+ * + *+ * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the pysparkJob field is set. */ boolean hasPysparkJob(); /** - *.google.cloud.dataproc.v1.PySparkJob pyspark_job = 4;
+ * + * + *+ * Optional. Job is a PySpark job. + *+ * + *+ * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The pysparkJob. */ com.google.cloud.dataproc.v1.PySparkJob getPysparkJob(); - /**.google.cloud.dataproc.v1.PySparkJob pyspark_job = 4;
*/ + /** + * + * + *+ * Optional. Job is a PySpark job. + *+ * + *+ * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder(); /** - *.google.cloud.dataproc.v1.HiveJob hive_job = 5;
+ * + * + *+ * Optional. Job is a Hive job. + *+ * + *.google.cloud.dataproc.v1.HiveJob hive_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the hiveJob field is set. */ boolean hasHiveJob(); /** - *.google.cloud.dataproc.v1.HiveJob hive_job = 5;
+ * + * + *+ * Optional. Job is a Hive job. + *+ * + *.google.cloud.dataproc.v1.HiveJob hive_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The hiveJob. */ com.google.cloud.dataproc.v1.HiveJob getHiveJob(); - /**.google.cloud.dataproc.v1.HiveJob hive_job = 5;
*/ + /** + * + * + *+ * Optional. Job is a Hive job. + *+ * + *.google.cloud.dataproc.v1.HiveJob hive_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder(); /** - *.google.cloud.dataproc.v1.PigJob pig_job = 6;
+ * + * + *+ * Optional. Job is a Pig job. + *+ * + *.google.cloud.dataproc.v1.PigJob pig_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the pigJob field is set. */ boolean hasPigJob(); /** - *.google.cloud.dataproc.v1.PigJob pig_job = 6;
+ * + * + *+ * Optional. Job is a Pig job. + *+ * + *.google.cloud.dataproc.v1.PigJob pig_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The pigJob. */ com.google.cloud.dataproc.v1.PigJob getPigJob(); - /**.google.cloud.dataproc.v1.PigJob pig_job = 6;
*/ + /** + * + * + *+ * Optional. Job is a Pig job. + *+ * + *.google.cloud.dataproc.v1.PigJob pig_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder(); /** * * *- * Spark R job + * Optional. Job is a SparkR job. ** - *.google.cloud.dataproc.v1.SparkRJob spark_r_job = 11;
+ *+ * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 11 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the sparkRJob field is set. */ @@ -155,10 +281,12 @@ public interface OrderedJobOrBuilder * * *- * Spark R job + * Optional. Job is a SparkR job. ** - *.google.cloud.dataproc.v1.SparkRJob spark_r_job = 11;
+ *+ * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 11 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The sparkRJob. */ @@ -167,36 +295,66 @@ public interface OrderedJobOrBuilder * * *- * Spark R job + * Optional. Job is a SparkR job. ** - *.google.cloud.dataproc.v1.SparkRJob spark_r_job = 11;
+ *+ * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 11 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ com.google.cloud.dataproc.v1.SparkRJobOrBuilder getSparkRJobOrBuilder(); /** - *.google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7;
+ * + * + *+ * Optional. Job is a SparkSql job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the sparkSqlJob field is set. */ boolean hasSparkSqlJob(); /** - *.google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7;
+ * + * + *+ * Optional. Job is a SparkSql job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The sparkSqlJob. */ com.google.cloud.dataproc.v1.SparkSqlJob getSparkSqlJob(); - /**.google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7;
*/ + /** + * + * + *+ * Optional. Job is a SparkSql job. + *+ * + *+ * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + *
+ */ com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder(); /** * * *- * Presto job + * Optional. Job is a Presto job. ** - *.google.cloud.dataproc.v1.PrestoJob presto_job = 12;
+ *+ * .google.cloud.dataproc.v1.PrestoJob presto_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return Whether the prestoJob field is set. */ @@ -205,10 +363,12 @@ public interface OrderedJobOrBuilder * * *- * Presto job + * Optional. Job is a Presto job. ** - *.google.cloud.dataproc.v1.PrestoJob presto_job = 12;
+ *+ * .google.cloud.dataproc.v1.PrestoJob presto_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + *
* * @return The prestoJob. */ @@ -217,10 +377,12 @@ public interface OrderedJobOrBuilder * * *- * Presto job + * Optional. Job is a Presto job. ** - *.google.cloud.dataproc.v1.PrestoJob presto_job = 12;
+ *+ * .google.cloud.dataproc.v1.PrestoJob presto_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + *
*/ com.google.cloud.dataproc.v1.PrestoJobOrBuilder getPrestoJobOrBuilder(); diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkJob.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkJob.java index 8b77cef3..4fd5c99e 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkJob.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkJob.java @@ -483,8 +483,8 @@ public com.google.protobuf.ByteString getJarFileUrisBytes(int index) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -498,8 +498,8 @@ public com.google.protobuf.ProtocolStringList getFileUrisList() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -513,8 +513,8 @@ public int getFileUrisCount() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -529,8 +529,8 @@ public java.lang.String getFileUris(int index) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -548,7 +548,8 @@ public com.google.protobuf.ByteString getFileUrisBytes(int index) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -563,7 +564,8 @@ public com.google.protobuf.ProtocolStringList getArchiveUrisList() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -578,7 +580,8 @@ public int getArchiveUrisCount() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -594,7 +597,8 @@ public java.lang.String getArchiveUris(int index) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -1995,8 +1999,8 @@ private void ensureFileUrisIsMutable() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2010,8 +2014,8 @@ public com.google.protobuf.ProtocolStringList getFileUrisList() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2025,8 +2029,8 @@ public int getFileUrisCount() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2041,8 +2045,8 @@ public java.lang.String getFileUris(int index) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2057,8 +2061,8 @@ public com.google.protobuf.ByteString getFileUrisBytes(int index) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2080,8 +2084,8 @@ public Builder setFileUris(int index, java.lang.String value) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2102,8 +2106,8 @@ public Builder addFileUris(java.lang.String value) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2121,8 +2125,8 @@ public Builder addAllFileUris(java.lang.Iterablevalues) { * * * - * Optional. HCFS URIs of files to be copied to the working directory of - * Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2139,8 +2143,8 @@ public Builder clearFileUris() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2172,7 +2176,8 @@ private void ensureArchiveUrisIsMutable() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -2187,7 +2192,8 @@ public com.google.protobuf.ProtocolStringList getArchiveUrisList() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -2202,7 +2208,8 @@ public int getArchiveUrisCount() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -2218,7 +2225,8 @@ public java.lang.String getArchiveUris(int index) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -2234,7 +2242,8 @@ public com.google.protobuf.ByteString getArchiveUrisBytes(int index) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -2257,7 +2266,8 @@ public Builder setArchiveUris(int index, java.lang.String value) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -2279,7 +2289,8 @@ public Builder addArchiveUris(java.lang.String value) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -2298,7 +2309,8 @@ public Builder addAllArchiveUris(java.lang.Iterablevalues) { * * * - * Optional. HCFS URIs of archives to be extracted in the working directory of + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -2316,7 +2328,8 @@ public Builder clearArchiveUris() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkJobOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkJobOrBuilder.java index beca8d0b..89648ac7 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkJobOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkJobOrBuilder.java @@ -223,8 +223,8 @@ public interface PySparkJobOrBuilder * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -236,8 +236,8 @@ public interface PySparkJobOrBuilder * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -249,8 +249,8 @@ public interface PySparkJobOrBuilder * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -263,8 +263,8 @@ public interface PySparkJobOrBuilder * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Python drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -278,7 +278,8 @@ public interface PySparkJobOrBuilder * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -291,7 +292,8 @@ public interface PySparkJobOrBuilder * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -304,7 +306,8 @@ public interface PySparkJobOrBuilder * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -318,7 +321,8 @@ public interface PySparkJobOrBuilder * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfig.java index 5102bb48..df14bda8 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfig.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfig.java @@ -175,7 +175,7 @@ protected com.google.protobuf.MapField internalGetMapField(int number) { ** Optional. The version of software inside the cluster. It must be one of the * supported [Dataproc - * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), + * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), * such as "1.2" (including a subminor version, such as "1.2.29"), or the * ["preview" * version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). @@ -204,7 +204,7 @@ public java.lang.String getImageVersion() { ** Optional. The version of software inside the cluster. It must be one of the * supported [Dataproc - * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), + * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), * such as "1.2" (including a subminor version, such as "1.2.29"), or the * ["preview" * version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). @@ -903,7 +903,7 @@ public Builder mergeFrom( ** Optional. The version of software inside the cluster. It must be one of the * supported [Dataproc - * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), + * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), * such as "1.2" (including a subminor version, such as "1.2.29"), or the * ["preview" * version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). @@ -931,7 +931,7 @@ public java.lang.String getImageVersion() { ** Optional. The version of software inside the cluster. It must be one of the * supported [Dataproc - * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), + * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), * such as "1.2" (including a subminor version, such as "1.2.29"), or the * ["preview" * version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). @@ -959,7 +959,7 @@ public com.google.protobuf.ByteString getImageVersionBytes() { ** Optional. The version of software inside the cluster. It must be one of the * supported [Dataproc - * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), + * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), * such as "1.2" (including a subminor version, such as "1.2.29"), or the * ["preview" * version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). @@ -986,7 +986,7 @@ public Builder setImageVersion(java.lang.String value) { ** Optional. The version of software inside the cluster. It must be one of the * supported [Dataproc - * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), + * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), * such as "1.2" (including a subminor version, such as "1.2.29"), or the * ["preview" * version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). @@ -1009,7 +1009,7 @@ public Builder clearImageVersion() { ** Optional. The version of software inside the cluster. It must be one of the * supported [Dataproc - * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), + * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), * such as "1.2" (including a subminor version, such as "1.2.29"), or the * ["preview" * version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfigOrBuilder.java index 063de799..427f8991 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfigOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfigOrBuilder.java @@ -29,7 +29,7 @@ public interface SoftwareConfigOrBuilder ** Optional. The version of software inside the cluster. It must be one of the * supported [Dataproc - * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), + * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), * such as "1.2" (including a subminor version, such as "1.2.29"), or the * ["preview" * version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). @@ -47,7 +47,7 @@ public interface SoftwareConfigOrBuilder ** Optional. The version of software inside the cluster. It must be one of the * supported [Dataproc - * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), + * Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), * such as "1.2" (including a subminor version, such as "1.2.29"), or the * ["preview" * version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkJob.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkJob.java index 5753b6f3..27fad2e2 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkJob.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkJob.java @@ -518,8 +518,8 @@ public com.google.protobuf.ByteString getJarFileUrisBytes(int index) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -533,8 +533,8 @@ public com.google.protobuf.ProtocolStringList getFileUrisList() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -548,8 +548,8 @@ public int getFileUrisCount() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -564,8 +564,8 @@ public java.lang.String getFileUris(int index) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -583,8 +583,8 @@ public com.google.protobuf.ByteString getFileUrisBytes(int index) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory - * of Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -599,8 +599,8 @@ public com.google.protobuf.ProtocolStringList getArchiveUrisList() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory - * of Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -615,8 +615,8 @@ public int getArchiveUrisCount() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory - * of Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -632,8 +632,8 @@ public java.lang.String getArchiveUris(int index) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory - * of Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -2015,8 +2015,8 @@ private void ensureFileUrisIsMutable() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2030,8 +2030,8 @@ public com.google.protobuf.ProtocolStringList getFileUrisList() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2045,8 +2045,8 @@ public int getFileUrisCount() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2061,8 +2061,8 @@ public java.lang.String getFileUris(int index) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2077,8 +2077,8 @@ public com.google.protobuf.ByteString getFileUrisBytes(int index) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2100,8 +2100,8 @@ public Builder setFileUris(int index, java.lang.String value) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2122,8 +2122,8 @@ public Builder addFileUris(java.lang.String value) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2141,8 +2141,8 @@ public Builder addAllFileUris(java.lang.Iterablevalues) { * * * - * Optional. HCFS URIs of files to be copied to the working directory of - * Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2159,8 +2159,8 @@ public Builder clearFileUris() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -2192,8 +2192,8 @@ private void ensureArchiveUrisIsMutable() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory - * of Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -2208,8 +2208,8 @@ public com.google.protobuf.ProtocolStringList getArchiveUrisList() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory - * of Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -2224,8 +2224,8 @@ public int getArchiveUrisCount() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory - * of Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -2241,8 +2241,8 @@ public java.lang.String getArchiveUris(int index) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory - * of Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -2258,8 +2258,8 @@ public com.google.protobuf.ByteString getArchiveUrisBytes(int index) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory - * of Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -2282,8 +2282,8 @@ public Builder setArchiveUris(int index, java.lang.String value) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory - * of Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -2305,8 +2305,8 @@ public Builder addArchiveUris(java.lang.String value) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory - * of Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -2325,8 +2325,8 @@ public Builder addAllArchiveUris(java.lang.Iterablevalues) { * * * - * Optional. HCFS URIs of archives to be extracted in the working directory - * of Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -2344,8 +2344,8 @@ public Builder clearArchiveUris() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory - * of Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkJobOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkJobOrBuilder.java index cbca0e8e..d7bee0cc 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkJobOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkJobOrBuilder.java @@ -193,8 +193,8 @@ public interface SparkJobOrBuilder * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -206,8 +206,8 @@ public interface SparkJobOrBuilder * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -219,8 +219,8 @@ public interface SparkJobOrBuilder * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -233,8 +233,8 @@ public interface SparkJobOrBuilder * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * Spark drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -248,8 +248,8 @@ public interface SparkJobOrBuilder * * *- * Optional. HCFS URIs of archives to be extracted in the working directory - * of Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -262,8 +262,8 @@ public interface SparkJobOrBuilder * * *- * Optional. HCFS URIs of archives to be extracted in the working directory - * of Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -276,8 +276,8 @@ public interface SparkJobOrBuilder * * *- * Optional. HCFS URIs of archives to be extracted in the working directory - * of Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -291,8 +291,8 @@ public interface SparkJobOrBuilder * * *- * Optional. HCFS URIs of archives to be extracted in the working directory - * of Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJob.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJob.java index 9fda5150..b4ed383c 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJob.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJob.java @@ -324,8 +324,8 @@ public com.google.protobuf.ByteString getArgsBytes(int index) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL];
@@ -339,8 +339,8 @@ public com.google.protobuf.ProtocolStringList getFileUrisList() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL];
@@ -354,8 +354,8 @@ public int getFileUrisCount() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL];
@@ -370,8 +370,8 @@ public java.lang.String getFileUris(int index) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL];
@@ -389,8 +389,8 @@ public com.google.protobuf.ByteString getFileUrisBytes(int index) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of - * Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -405,8 +405,8 @@ public com.google.protobuf.ProtocolStringList getArchiveUrisList() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of - * Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -421,8 +421,8 @@ public int getArchiveUrisCount() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of - * Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -438,8 +438,8 @@ public java.lang.String getArchiveUris(int index) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of - * Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -1419,8 +1419,8 @@ private void ensureFileUrisIsMutable() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL];
@@ -1434,8 +1434,8 @@ public com.google.protobuf.ProtocolStringList getFileUrisList() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL];
@@ -1449,8 +1449,8 @@ public int getFileUrisCount() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL];
@@ -1465,8 +1465,8 @@ public java.lang.String getFileUris(int index) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL];
@@ -1481,8 +1481,8 @@ public com.google.protobuf.ByteString getFileUrisBytes(int index) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL];
@@ -1504,8 +1504,8 @@ public Builder setFileUris(int index, java.lang.String value) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL];
@@ -1526,8 +1526,8 @@ public Builder addFileUris(java.lang.String value) { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL];
@@ -1545,8 +1545,8 @@ public Builder addAllFileUris(java.lang.Iterablevalues) { * * * - * Optional. HCFS URIs of files to be copied to the working directory of - * R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL];
@@ -1563,8 +1563,8 @@ public Builder clearFileUris() { * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL];
@@ -1596,8 +1596,8 @@ private void ensureArchiveUrisIsMutable() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of - * Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -1612,8 +1612,8 @@ public com.google.protobuf.ProtocolStringList getArchiveUrisList() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of - * Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -1628,8 +1628,8 @@ public int getArchiveUrisCount() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of - * Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -1645,8 +1645,8 @@ public java.lang.String getArchiveUris(int index) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of - * Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -1662,8 +1662,8 @@ public com.google.protobuf.ByteString getArchiveUrisBytes(int index) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of - * Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -1686,8 +1686,8 @@ public Builder setArchiveUris(int index, java.lang.String value) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of - * Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -1709,8 +1709,8 @@ public Builder addArchiveUris(java.lang.String value) { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of - * Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -1729,8 +1729,8 @@ public Builder addAllArchiveUris(java.lang.Iterablevalues) { * * * - * Optional. HCFS URIs of archives to be extracted in the working directory of - * Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -1748,8 +1748,8 @@ public Builder clearArchiveUris() { * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of - * Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJobOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJobOrBuilder.java index 10bdca04..583317e7 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJobOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJobOrBuilder.java @@ -113,8 +113,8 @@ public interface SparkRJobOrBuilder * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL];
@@ -126,8 +126,8 @@ public interface SparkRJobOrBuilder * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL];
@@ -139,8 +139,8 @@ public interface SparkRJobOrBuilder * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL];
@@ -153,8 +153,8 @@ public interface SparkRJobOrBuilder * * *- * Optional. HCFS URIs of files to be copied to the working directory of - * R drivers and distributed tasks. Useful for naively parallel tasks. + * Optional. HCFS URIs of files to be placed in the working directory of + * each executor. Useful for naively parallel tasks. ** *repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL];
@@ -168,8 +168,8 @@ public interface SparkRJobOrBuilder * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of - * Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -182,8 +182,8 @@ public interface SparkRJobOrBuilder * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of - * Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -196,8 +196,8 @@ public interface SparkRJobOrBuilder * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of - * Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** @@ -211,8 +211,8 @@ public interface SparkRJobOrBuilder * * *- * Optional. HCFS URIs of archives to be extracted in the working directory of - * Spark drivers and tasks. Supported file types: + * Optional. HCFS URIs of archives to be extracted into the working directory + * of each executor. Supported file types: * .jar, .tar, .tar.gz, .tgz, and .zip. ** diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplate.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplate.java index 7997e5fe..f6fa56c0 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplate.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplate.java @@ -719,7 +719,7 @@ public com.google.cloud.dataproc.v1.OrderedJobOrBuilder getJobsOrBuilder(int ind * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -736,7 +736,7 @@ public java.util.ListgetParamet * * * - * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -754,7 +754,7 @@ public java.util.ListgetParamet * * * - * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -771,7 +771,7 @@ public int getParametersCount() { * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -788,7 +788,7 @@ public com.google.cloud.dataproc.v1.TemplateParameter getParameters(int index) { * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -2900,7 +2900,7 @@ private void ensureParametersIsMutable() { * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -2920,7 +2920,7 @@ public java.util.ListgetParamet * * * - * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -2940,7 +2940,7 @@ public int getParametersCount() { * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -2960,7 +2960,7 @@ public com.google.cloud.dataproc.v1.TemplateParameter getParameters(int index) { * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -2986,7 +2986,7 @@ public Builder setParameters(int index, com.google.cloud.dataproc.v1.TemplatePar * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -3010,7 +3010,7 @@ public Builder setParameters( * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -3036,7 +3036,7 @@ public Builder addParameters(com.google.cloud.dataproc.v1.TemplateParameter valu * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -3062,7 +3062,7 @@ public Builder addParameters(int index, com.google.cloud.dataproc.v1.TemplatePar * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -3086,7 +3086,7 @@ public Builder addParameters( * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -3110,7 +3110,7 @@ public Builder addParameters( * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -3134,7 +3134,7 @@ public Builder addAllParameters( * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -3157,7 +3157,7 @@ public Builder clearParameters() { * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -3180,7 +3180,7 @@ public Builder removeParameters(int index) { * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -3196,7 +3196,7 @@ public com.google.cloud.dataproc.v1.TemplateParameter.Builder getParametersBuild * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -3217,7 +3217,7 @@ public com.google.cloud.dataproc.v1.TemplateParameterOrBuilder getParametersOrBu * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -3238,7 +3238,7 @@ public com.google.cloud.dataproc.v1.TemplateParameterOrBuilder getParametersOrBu * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -3255,7 +3255,7 @@ public com.google.cloud.dataproc.v1.TemplateParameter.Builder addParametersBuild * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -3272,7 +3272,7 @@ public com.google.cloud.dataproc.v1.TemplateParameter.Builder addParametersBuild * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateOrBuilder.java index b1c121e1..524587ca 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateOrBuilder.java @@ -371,7 +371,7 @@ public interface WorkflowTemplateOrBuilder * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -385,7 +385,7 @@ public interface WorkflowTemplateOrBuilder * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -399,7 +399,7 @@ public interface WorkflowTemplateOrBuilder * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -413,7 +413,7 @@ public interface WorkflowTemplateOrBuilder * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *@@ -428,7 +428,7 @@ public interface WorkflowTemplateOrBuilder * * *- * Optional. emplate parameters whose values are substituted into the + * Optional. Template parameters whose values are substituted into the * template. Values for parameters must be provided when the template is * instantiated. *diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplatesProto.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplatesProto.java index df69232f..c44ab556 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplatesProto.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplatesProto.java @@ -186,19 +186,20 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "\002 \003(\0132<.google.cloud.dataproc.v1.Cluster" + "Selector.ClusterLabelsEntryB\003\340A\002\0324\n\022Clus" + "terLabelsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001" - + "(\t:\0028\001\"\336\005\n\nOrderedJob\022\024\n\007step_id\030\001 \001(\tB\003" - + "\340A\002\0229\n\nhadoop_job\030\002 \001(\0132#.google.cloud.d" - + "ataproc.v1.HadoopJobH\000\0227\n\tspark_job\030\003 \001(" - + "\0132\".google.cloud.dataproc.v1.SparkJobH\000\022" - + ";\n\013pyspark_job\030\004 \001(\0132$.google.cloud.data" - + "proc.v1.PySparkJobH\000\0225\n\010hive_job\030\005 \001(\0132!" - + ".google.cloud.dataproc.v1.HiveJobH\000\0223\n\007p" - + "ig_job\030\006 \001(\0132 .google.cloud.dataproc.v1." - + "PigJobH\000\022:\n\013spark_r_job\030\013 \001(\0132#.google.c" - + "loud.dataproc.v1.SparkRJobH\000\022>\n\rspark_sq" - + "l_job\030\007 \001(\0132%.google.cloud.dataproc.v1.S" - + "parkSqlJobH\000\0229\n\npresto_job\030\014 \001(\0132#.googl" - + "e.cloud.dataproc.v1.PrestoJobH\000\022E\n\006label" + + "(\t:\0028\001\"\206\006\n\nOrderedJob\022\024\n\007step_id\030\001 \001(\tB\003" + + "\340A\002\022>\n\nhadoop_job\030\002 \001(\0132#.google.cloud.d" + + "ataproc.v1.HadoopJobB\003\340A\001H\000\022<\n\tspark_job" + + "\030\003 \001(\0132\".google.cloud.dataproc.v1.SparkJ" + + "obB\003\340A\001H\000\022@\n\013pyspark_job\030\004 \001(\0132$.google." + + "cloud.dataproc.v1.PySparkJobB\003\340A\001H\000\022:\n\010h" + + "ive_job\030\005 \001(\0132!.google.cloud.dataproc.v1" + + ".HiveJobB\003\340A\001H\000\0228\n\007pig_job\030\006 \001(\0132 .googl" + + "e.cloud.dataproc.v1.PigJobB\003\340A\001H\000\022?\n\013spa" + + "rk_r_job\030\013 \001(\0132#.google.cloud.dataproc.v" + + "1.SparkRJobB\003\340A\001H\000\022C\n\rspark_sql_job\030\007 \001(" + + "\0132%.google.cloud.dataproc.v1.SparkSqlJob" + + "B\003\340A\001H\000\022>\n\npresto_job\030\014 \001(\0132#.google.clo" + + "ud.dataproc.v1.PrestoJobB\003\340A\001H\000\022E\n\006label" + "s\030\010 \003(\01320.google.cloud.dataproc.v1.Order" + "edJob.LabelsEntryB\003\340A\001\022@\n\nscheduling\030\t \001" + "(\0132\'.google.cloud.dataproc.v1.JobSchedul" diff --git a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/autoscaling_policies.proto b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/autoscaling_policies.proto index 4ce5868d..8d10a86f 100644 --- a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/autoscaling_policies.proto +++ b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/autoscaling_policies.proto @@ -164,20 +164,26 @@ message BasicYarnAutoscalingConfig { // Bounds: [0s, 1d]. google.protobuf.Duration graceful_decommission_timeout = 5 [(google.api.field_behavior) = REQUIRED]; - // Required. Fraction of average pending memory in the last cooldown period + // Required. Fraction of average YARN pending memory in the last cooldown period // for which to add workers. A scale-up factor of 1.0 will result in scaling // up so that there is no pending memory remaining after the update (more // aggressive scaling). A scale-up factor closer to 0 will result in a smaller // magnitude of scaling up (less aggressive scaling). + // See [How autoscaling + // works](/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + // for more information. // // Bounds: [0.0, 1.0]. double scale_up_factor = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. Fraction of average pending memory in the last cooldown period + // Required. Fraction of average YARN pending memory in the last cooldown period // for which to remove workers. A scale-down factor of 1 will result in // scaling down so that there is no available memory remaining after the // update (more aggressive scaling). A scale-down factor of 0 disables // removing workers, which can be beneficial for autoscaling a single job. + // See [How autoscaling + // works](/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + // for more information. // // Bounds: [0.0, 1.0]. double scale_down_factor = 2 [(google.api.field_behavior) = REQUIRED]; diff --git a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/clusters.proto b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/clusters.proto index c66d35d3..ccff3522 100644 --- a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/clusters.proto +++ b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/clusters.proto @@ -170,6 +170,17 @@ message ClusterConfig { // bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). string config_bucket = 1 [(google.api.field_behavior) = OPTIONAL]; + // Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, + // such as Spark and MapReduce history files. + // If you do not specify a temp bucket, + // Dataproc will determine a Cloud Storage location (US, + // ASIA, or EU) for your cluster's temp bucket according to the + // Compute Engine zone where your cluster is deployed, and then create + // and manage this project-level, per-location bucket. The default bucket has + // a TTL of 90 days, but you can use any TTL (or none) if you specify a + // bucket. + string temp_bucket = 2 [(google.api.field_behavior) = OPTIONAL]; + // Optional. The shared Compute Engine config settings for // all instances in a cluster. GceClusterConfig gce_cluster_config = 8 [(google.api.field_behavior) = OPTIONAL]; @@ -216,6 +227,20 @@ message ClusterConfig { // Optional. Lifecycle setting for the cluster. LifecycleConfig lifecycle_config = 17 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Port/endpoint configuration for this cluster + EndpointConfig endpoint_config = 19 [(google.api.field_behavior) = OPTIONAL]; +} + +// Endpoint config for this cluster +message EndpointConfig { + // Output only. The map of port descriptions to URLs. Will only be populated + // if enable_http_port_access is true. + maphttp_ports = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. If true, enable http access to specific ports on the cluster + // from external sources. Defaults to false. + bool enable_http_port_access = 2 [(google.api.field_behavior) = OPTIONAL]; } // Autoscaling Policy config associated with the cluster. @@ -288,7 +313,7 @@ message GceClusterConfig { bool internal_ip_only = 7 [(google.api.field_behavior) = OPTIONAL]; // Optional. The [Dataproc service - // account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) + // account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) // (also see [VM Data Plane // identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) // used by Dataproc cluster VM instances to access Google Cloud Platform @@ -332,6 +357,27 @@ message GceClusterConfig { // The config settings for Compute Engine resources in // an instance group, such as a master or worker group. message InstanceGroupConfig { + // Controls the use of + // [preemptible instances] + // (https://cloud.google.com/compute/docs/instances/preemptible) + // within the group. + enum Preemptibility { + // Preemptibility is unspecified, the system will choose the + // appropriate setting for each instance group. + PREEMPTIBILITY_UNSPECIFIED = 0; + + // Instances are non-preemptible. + // + // This option is allowed for all instance groups and is the only valid + // value for Master and Worker instance groups. + NON_PREEMPTIBLE = 1; + + // Instances are preemptible. + // + // This option is allowed only for secondary worker groups. + PREEMPTIBLE = 2; + } + // Optional. The number of VM instances in the instance group. // For master instance groups, must be set to 1. int32 num_instances = 1 [(google.api.field_behavior) = OPTIONAL]; @@ -382,6 +428,15 @@ message InstanceGroupConfig { // instances. bool is_preemptible = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Optional. Specifies the preemptibility of the instance group. + // + // The default value for master and worker groups is + // `NON_PREEMPTIBLE`. This default cannot be changed. + // + // The default value for secondary instances is + // `PREEMPTIBLE`. + Preemptibility preemptibility = 10 [(google.api.field_behavior) = OPTIONAL]; + // Output only. The config for Compute Engine Instance Group // Manager that manages this group. // This is only used for preemptible instance groups. @@ -608,7 +663,7 @@ message KerberosConfig { message SoftwareConfig { // Optional. The version of software inside the cluster. It must be one of the // supported [Dataproc - // Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), + // Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), // such as "1.2" (including a subminor version, such as "1.2.29"), or the // ["preview" // version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). diff --git a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/jobs.proto b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/jobs.proto index b9026f62..065530f3 100644 --- a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/jobs.proto +++ b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/jobs.proto @@ -224,12 +224,12 @@ message SparkJob { // Spark driver and tasks. repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; - // Optional. HCFS URIs of files to be copied to the working directory of - // Spark drivers and distributed tasks. Useful for naively parallel tasks. + // Optional. HCFS URIs of files to be placed in the working directory of + // each executor. Useful for naively parallel tasks. repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; - // Optional. HCFS URIs of archives to be extracted in the working directory - // of Spark drivers and tasks. Supported file types: + // Optional. HCFS URIs of archives to be extracted into the working directory + // of each executor. Supported file types: // .jar, .tar, .tar.gz, .tgz, and .zip. repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; @@ -265,11 +265,12 @@ message PySparkJob { // Python driver and tasks. repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; - // Optional. HCFS URIs of files to be copied to the working directory of - // Python drivers and distributed tasks. Useful for naively parallel tasks. + // Optional. HCFS URIs of files to be placed in the working directory of + // each executor. Useful for naively parallel tasks. repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; - // Optional. HCFS URIs of archives to be extracted in the working directory of + // Optional. HCFS URIs of archives to be extracted into the working directory + // of each executor. Supported file types: // .jar, .tar, .tar.gz, .tgz, and .zip. repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; @@ -414,12 +415,12 @@ message SparkRJob { // occur that causes an incorrect job submission. repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; - // Optional. HCFS URIs of files to be copied to the working directory of - // R drivers and distributed tasks. Useful for naively parallel tasks. + // Optional. HCFS URIs of files to be placed in the working directory of + // each executor. Useful for naively parallel tasks. repeated string file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; - // Optional. HCFS URIs of archives to be extracted in the working directory of - // Spark drivers and tasks. Supported file types: + // Optional. HCFS URIs of archives to be extracted into the working directory + // of each executor. Supported file types: // .jar, .tar, .tar.gz, .tgz, and .zip. repeated string archive_uris = 4 [(google.api.field_behavior) = OPTIONAL]; @@ -565,9 +566,9 @@ message JobStatus { // Encapsulates the full scoping used to reference a job. message JobReference { - // Required. The ID of the Google Cloud Platform project that the job - // belongs to. - string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + // Optional. The ID of the Google Cloud Platform project that the job belongs to. If + // specified, must match the request project ID. + string project_id = 1 [(google.api.field_behavior) = OPTIONAL]; // Optional. The job ID, which must be unique within the project. // diff --git a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/shared.proto b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/shared.proto index 99d6e776..7a1382f1 100644 --- a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/shared.proto +++ b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/shared.proto @@ -25,7 +25,7 @@ option java_package = "com.google.cloud.dataproc.v1"; // Cluster components that can be activated. enum Component { - // Unspecified component. + // Unspecified component. Specifying this will cause Cluster creation to fail. COMPONENT_UNSPECIFIED = 0; // The Anaconda python distribution. diff --git a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/workflow_templates.proto b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/workflow_templates.proto index d1cfcc09..04f81004 100644 --- a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/workflow_templates.proto +++ b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/workflow_templates.proto @@ -238,7 +238,7 @@ message WorkflowTemplate { // Required. The Directed Acyclic Graph of Jobs to submit. repeated OrderedJob jobs = 8 [(google.api.field_behavior) = REQUIRED]; - // Optional. emplate parameters whose values are substituted into the + // Optional. Template parameters whose values are substituted into the // template. Values for parameters must be provided when the template is // instantiated. repeated TemplateParameter parameters = 9 [(google.api.field_behavior) = OPTIONAL]; @@ -319,23 +319,29 @@ message OrderedJob { // Required. The job definition. oneof job_type { - HadoopJob hadoop_job = 2; + // Optional. Job is a Hadoop job. + HadoopJob hadoop_job = 2 [(google.api.field_behavior) = OPTIONAL]; - SparkJob spark_job = 3; + // Optional. Job is a Spark job. + SparkJob spark_job = 3 [(google.api.field_behavior) = OPTIONAL]; - PySparkJob pyspark_job = 4; + // Optional. Job is a PySpark job. + PySparkJob pyspark_job = 4 [(google.api.field_behavior) = OPTIONAL]; - HiveJob hive_job = 5; + // Optional. Job is a Hive job. + HiveJob hive_job = 5 [(google.api.field_behavior) = OPTIONAL]; - PigJob pig_job = 6; + // Optional. Job is a Pig job. + PigJob pig_job = 6 [(google.api.field_behavior) = OPTIONAL]; - // Spark R job - SparkRJob spark_r_job = 11; + // Optional. Job is a SparkR job. + SparkRJob spark_r_job = 11 [(google.api.field_behavior) = OPTIONAL]; - SparkSqlJob spark_sql_job = 7; + // Optional. Job is a SparkSql job. + SparkSqlJob spark_sql_job = 7 [(google.api.field_behavior) = OPTIONAL]; - // Presto job - PrestoJob presto_job = 12; + // Optional. Job is a Presto job. + PrestoJob presto_job = 12 [(google.api.field_behavior) = OPTIONAL]; } // Optional. The labels to associate with this job. diff --git a/synth.metadata b/synth.metadata index f24079fc..610f8a4e 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,23 +4,23 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/java-dataproc.git", - "sha": "50f0ad0e3f3184d227927c172a1055bbaa8bdcb0" + "sha": "8c3bb0b5b4c2fe6b818ae8eb754238dae5468b51" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "0e7900e3950d32ccafae22a2ccb85fa61ffe08f9", - "internalRef": "321255718" + "sha": "d8a3dfb82f5cae3f1bcdcec7c5726581532da7d5", + "internalRef": "323829608" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "0e7900e3950d32ccafae22a2ccb85fa61ffe08f9", - "internalRef": "321255718" + "sha": "d8a3dfb82f5cae3f1bcdcec7c5726581532da7d5", + "internalRef": "323829608" } }, { @@ -260,6 +260,8 @@ "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfigOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/EncryptionConfig.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/EncryptionConfigOrBuilder.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/EndpointConfig.java", + "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/EndpointConfigOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java", "proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetAutoscalingPolicyRequest.java",