diff --git a/google/cloud/dataproc_v1/gapic/cluster_controller_client.py b/google/cloud/dataproc_v1/gapic/cluster_controller_client.py index f849ff06..fdf10818 100644 --- a/google/cloud/dataproc_v1/gapic/cluster_controller_client.py +++ b/google/cloud/dataproc_v1/gapic/cluster_controller_client.py @@ -416,10 +416,12 @@ def update_cluster( message :class:`~google.cloud.dataproc_v1.types.FieldMask` graceful_decommission_timeout (Union[dict, ~google.cloud.dataproc_v1.types.Duration]): Optional. Timeout for graceful YARN decomissioning. Graceful decommissioning allows removing nodes from the cluster without - interrupting jobs in progress. Timeout specifies how long to wait for jobs - in progress to finish before forcefully removing nodes (and potentially - interrupting jobs). Default timeout is 0 (for forceful decommission), and - the maximum allowed timeout is 1 day. + interrupting jobs in progress. Timeout specifies how long to wait for + jobs in progress to finish before forcefully removing nodes (and + potentially interrupting jobs). Default timeout is 0 (for forceful + decommission), and the maximum allowed timeout is 1 day. (see JSON + representation of + `Duration `__). Only supported on Dataproc image versions 1.2 and higher. diff --git a/google/cloud/dataproc_v1/gapic/enums.py b/google/cloud/dataproc_v1/gapic/enums.py index 9bbaf2a6..b7d7023c 100644 --- a/google/cloud/dataproc_v1/gapic/enums.py +++ b/google/cloud/dataproc_v1/gapic/enums.py @@ -207,6 +207,25 @@ class Level(enum.IntEnum): OFF = 8 +class ReservationAffinity(object): + class Type(enum.IntEnum): + """ + Indicates whether to consume capacity from an reservation or not. + + Attributes: + TYPE_UNSPECIFIED (int) + NO_RESERVATION (int): Do not consume from any allocated capacity. + ANY_RESERVATION (int): Consume any reservation available. + SPECIFIC_RESERVATION (int): Must consume from a specific reservation. Must specify key value fields + for specifying the reservations. + """ + + TYPE_UNSPECIFIED = 0 + NO_RESERVATION = 1 + ANY_RESERVATION = 2 + SPECIFIC_RESERVATION = 3 + + class WorkflowMetadata(object): class State(enum.IntEnum): """ diff --git a/google/cloud/dataproc_v1/proto/autoscaling_policies.proto b/google/cloud/dataproc_v1/proto/autoscaling_policies.proto index 51fbc87d..53321d89 100644 --- a/google/cloud/dataproc_v1/proto/autoscaling_policies.proto +++ b/google/cloud/dataproc_v1/proto/autoscaling_policies.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -28,7 +27,6 @@ option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dat option java_multiple_files = true; option java_outer_classname = "AutoscalingPoliciesProto"; option java_package = "com.google.cloud.dataproc.v1"; - option (google.api.resource_definition) = { type: "dataproc.googleapis.com/Region" pattern: "projects/{project}/regions/{region}" diff --git a/google/cloud/dataproc_v1/proto/clusters.proto b/google/cloud/dataproc_v1/proto/clusters.proto index bc254589..a20b8f31 100644 --- a/google/cloud/dataproc_v1/proto/clusters.proto +++ b/google/cloud/dataproc_v1/proto/clusters.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -20,7 +19,6 @@ package google.cloud.dataproc.v1; import "google/api/annotations.proto"; import "google/api/client.proto"; import "google/api/field_behavior.proto"; -import "google/cloud/dataproc/v1/operations.proto"; import "google/cloud/dataproc/v1/shared.proto"; import "google/longrunning/operations.proto"; import "google/protobuf/duration.proto"; @@ -40,7 +38,7 @@ service ClusterController { // Creates a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/projects/{project_id}/regions/{region}/clusters" @@ -55,22 +53,22 @@ service ClusterController { // Updates a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). rpc UpdateCluster(UpdateClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { patch: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}" body: "cluster" }; + option (google.api.method_signature) = "project_id,region,cluster_name,cluster,update_mask"; option (google.longrunning.operation_info) = { response_type: "Cluster" metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata" }; - option (google.api.method_signature) = "project_id,region,cluster_name,cluster,update_mask"; } // Deletes a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). rpc DeleteCluster(DeleteClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { delete: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}" @@ -101,11 +99,11 @@ service ClusterController { // Gets cluster diagnostic information. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). // After the operation completes, // [Operation.response][google.longrunning.Operation.response] // contains - // [DiagnoseClusterResults](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). + // [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). rpc DiagnoseCluster(DiagnoseClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose" @@ -215,6 +213,9 @@ message ClusterConfig { // Optional. Security settings for the cluster. SecurityConfig security_config = 16 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Lifecycle setting for the cluster. + LifecycleConfig lifecycle_config = 17 [(google.api.field_behavior) = OPTIONAL]; } // Autoscaling Policy config associated with the cluster. @@ -322,9 +323,12 @@ message GceClusterConfig { // [Project and instance // metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). map metadata = 5; + + // Optional. Reservation Affinity for consuming Zonal reservation. + ReservationAffinity reservation_affinity = 11 [(google.api.field_behavior) = OPTIONAL]; } -// Optional. The config settings for Compute Engine resources in +// The config settings for Compute Engine resources in // an instance group, such as a master or worker group. message InstanceGroupConfig { // Optional. The number of VM instances in the instance group. @@ -438,7 +442,10 @@ message NodeInitializationAction { string executable_file = 1 [(google.api.field_behavior) = REQUIRED]; // Optional. Amount of time executable has to complete. Default is - // 10 minutes. Cluster creation fails with an explanatory error message (the + // 10 minutes (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + // + // Cluster creation fails with an explanatory error message (the // name of the executable that caused the error and the exceeded timeout // period) if the executable is not completed at end of the timeout period. google.protobuf.Duration execution_timeout = 2 [(google.api.field_behavior) = OPTIONAL]; @@ -495,7 +502,8 @@ message ClusterStatus { (google.api.field_behavior) = OPTIONAL ]; - // Output only. Time when this state was entered. + // Output only. Time when this state was entered (see JSON representation of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). google.protobuf.Timestamp state_start_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Additional state information that includes @@ -613,6 +621,36 @@ message SoftwareConfig { repeated Component optional_components = 3 [(google.api.field_behavior) = OPTIONAL]; } +// Specifies the cluster auto-delete schedule configuration. +message LifecycleConfig { + // Optional. The duration to keep the cluster alive while idling (when no jobs + // are running). Passing this threshold will cause the cluster to be + // deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON + // representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json). + google.protobuf.Duration idle_delete_ttl = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Either the exact time the cluster should be deleted at or + // the cluster maximum age. + oneof ttl { + // Optional. The time when cluster will be auto-deleted (see JSON representation of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Timestamp auto_delete_time = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The lifetime duration of cluster. The cluster will be + // auto-deleted at the end of this period. Minimum value is 10 minutes; + // maximum value is 14 days (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Duration auto_delete_ttl = 3 [(google.api.field_behavior) = OPTIONAL]; + } + + // Output only. The time when cluster became idle (most recent job finished) + // and became eligible for deletion due to idleness (see JSON representation + // of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Timestamp idle_start_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + // Contains cluster daemon metrics, such as HDFS and YARN stats. // // **Beta Feature**: This report is available for testing purposes only. It may @@ -671,7 +709,8 @@ message UpdateClusterRequest { // interrupting jobs in progress. Timeout specifies how long to wait for jobs // in progress to finish before forcefully removing nodes (and potentially // interrupting jobs). Default timeout is 0 (for forceful decommission), and - // the maximum allowed timeout is 1 day. + // the maximum allowed timeout is 1 day. (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). // // Only supported on Dataproc image versions 1.2 and higher. google.protobuf.Duration graceful_decommission_timeout = 6 [(google.api.field_behavior) = OPTIONAL]; @@ -854,3 +893,30 @@ message DiagnoseClusterResults { // diagnostics. string output_uri = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; } + +// Reservation Affinity for consuming Zonal reservation. +message ReservationAffinity { + // Indicates whether to consume capacity from an reservation or not. + enum Type { + TYPE_UNSPECIFIED = 0; + + // Do not consume from any allocated capacity. + NO_RESERVATION = 1; + + // Consume any reservation available. + ANY_RESERVATION = 2; + + // Must consume from a specific reservation. Must specify key value fields + // for specifying the reservations. + SPECIFIC_RESERVATION = 3; + } + + // Optional. Type of reservation to consume + Type consume_reservation_type = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Corresponds to the label key of reservation resource. + string key = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Corresponds to the label values of reservation resource. + repeated string values = 3 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/google/cloud/dataproc_v1/proto/clusters_pb2.py b/google/cloud/dataproc_v1/proto/clusters_pb2.py index b4c0aa2b..3c4ebdae 100644 --- a/google/cloud/dataproc_v1/proto/clusters_pb2.py +++ b/google/cloud/dataproc_v1/proto/clusters_pb2.py @@ -18,9 +18,6 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.api import client_pb2 as google_dot_api_dot_client__pb2 from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.cloud.dataproc_v1.proto import ( - operations_pb2 as google_dot_cloud_dot_dataproc__v1_dot_proto_dot_operations__pb2, -) from google.cloud.dataproc_v1.proto import ( shared_pb2 as google_dot_cloud_dot_dataproc__v1_dot_proto_dot_shared__pb2, ) @@ -40,13 +37,12 @@ "\n\034com.google.cloud.dataproc.v1B\rClustersProtoP\001Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc" ), serialized_pb=_b( - '\n-google/cloud/dataproc_v1/proto/clusters.proto\x12\x18google.cloud.dataproc.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a/google/cloud/dataproc_v1/proto/operations.proto\x1a+google/cloud/dataproc_v1/proto/shared.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xc8\x03\n\x07\x43luster\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12<\n\x06\x63onfig\x18\x03 \x01(\x0b\x32\'.google.cloud.dataproc.v1.ClusterConfigB\x03\xe0\x41\x02\x12\x42\n\x06labels\x18\x08 \x03(\x0b\x32-.google.cloud.dataproc.v1.Cluster.LabelsEntryB\x03\xe0\x41\x01\x12<\n\x06status\x18\x04 \x01(\x0b\x32\'.google.cloud.dataproc.v1.ClusterStatusB\x03\xe0\x41\x03\x12\x44\n\x0estatus_history\x18\x07 \x03(\x0b\x32\'.google.cloud.dataproc.v1.ClusterStatusB\x03\xe0\x41\x03\x12\x19\n\x0c\x63luster_uuid\x18\x06 \x01(\tB\x03\xe0\x41\x03\x12\x39\n\x07metrics\x18\t \x01(\x0b\x32(.google.cloud.dataproc.v1.ClusterMetrics\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xe6\x05\n\rClusterConfig\x12\x1a\n\rconfig_bucket\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12K\n\x12gce_cluster_config\x18\x08 \x01(\x0b\x32*.google.cloud.dataproc.v1.GceClusterConfigB\x03\xe0\x41\x01\x12I\n\rmaster_config\x18\t \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfigB\x03\xe0\x41\x01\x12I\n\rworker_config\x18\n \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfigB\x03\xe0\x41\x01\x12S\n\x17secondary_worker_config\x18\x0c \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfigB\x03\xe0\x41\x01\x12\x46\n\x0fsoftware_config\x18\r \x01(\x0b\x32(.google.cloud.dataproc.v1.SoftwareConfigB\x03\xe0\x41\x01\x12W\n\x16initialization_actions\x18\x0b \x03(\x0b\x32\x32.google.cloud.dataproc.v1.NodeInitializationActionB\x03\xe0\x41\x01\x12J\n\x11\x65ncryption_config\x18\x0f \x01(\x0b\x32*.google.cloud.dataproc.v1.EncryptionConfigB\x03\xe0\x41\x01\x12L\n\x12\x61utoscaling_config\x18\x12 \x01(\x0b\x32+.google.cloud.dataproc.v1.AutoscalingConfigB\x03\xe0\x41\x01\x12\x46\n\x0fsecurity_config\x18\x10 \x01(\x0b\x32(.google.cloud.dataproc.v1.SecurityConfigB\x03\xe0\x41\x01",\n\x11\x41utoscalingConfig\x12\x17\n\npolicy_uri\x18\x01 \x01(\tB\x03\xe0\x41\x01"4\n\x10\x45ncryptionConfig\x12 \n\x13gce_pd_kms_key_name\x18\x01 \x01(\tB\x03\xe0\x41\x01"\xcd\x02\n\x10GceClusterConfig\x12\x15\n\x08zone_uri\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0bnetwork_uri\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0esubnetwork_uri\x18\x06 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10internal_ip_only\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12\x1c\n\x0fservice_account\x18\x08 \x01(\tB\x03\xe0\x41\x01\x12#\n\x16service_account_scopes\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x0c\n\x04tags\x18\x04 \x03(\t\x12J\n\x08metadata\x18\x05 \x03(\x0b\x32\x38.google.cloud.dataproc.v1.GceClusterConfig.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x9a\x03\n\x13InstanceGroupConfig\x12\x1a\n\rnum_instances\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1b\n\x0einstance_names\x18\x02 \x03(\tB\x03\xe0\x41\x03\x12\x16\n\timage_uri\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10machine_type_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12>\n\x0b\x64isk_config\x18\x05 \x01(\x0b\x32$.google.cloud.dataproc.v1.DiskConfigB\x03\xe0\x41\x01\x12\x1b\n\x0eis_preemptible\x18\x06 \x01(\x08\x42\x03\xe0\x41\x01\x12O\n\x14managed_group_config\x18\x07 \x01(\x0b\x32,.google.cloud.dataproc.v1.ManagedGroupConfigB\x03\xe0\x41\x03\x12\x46\n\x0c\x61\x63\x63\x65lerators\x18\x08 \x03(\x0b\x32+.google.cloud.dataproc.v1.AcceleratorConfigB\x03\xe0\x41\x01\x12\x1d\n\x10min_cpu_platform\x18\t \x01(\tB\x03\xe0\x41\x01"c\n\x12ManagedGroupConfig\x12#\n\x16instance_template_name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12(\n\x1binstance_group_manager_name\x18\x02 \x01(\tB\x03\xe0\x41\x03"L\n\x11\x41\x63\x63\x65leratorConfig\x12\x1c\n\x14\x61\x63\x63\x65lerator_type_uri\x18\x01 \x01(\t\x12\x19\n\x11\x61\x63\x63\x65lerator_count\x18\x02 \x01(\x05"f\n\nDiskConfig\x12\x1b\n\x0e\x62oot_disk_type\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x1e\n\x11\x62oot_disk_size_gb\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1b\n\x0enum_local_ssds\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01"s\n\x18NodeInitializationAction\x12\x1c\n\x0f\x65xecutable_file\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x39\n\x11\x65xecution_timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01"\x84\x03\n\rClusterStatus\x12\x41\n\x05state\x18\x01 \x01(\x0e\x32-.google.cloud.dataproc.v1.ClusterStatus.StateB\x03\xe0\x41\x03\x12\x16\n\x06\x64\x65tail\x18\x02 \x01(\tB\x06\xe0\x41\x03\xe0\x41\x01\x12\x39\n\x10state_start_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12G\n\x08substate\x18\x04 \x01(\x0e\x32\x30.google.cloud.dataproc.v1.ClusterStatus.SubstateB\x03\xe0\x41\x03"V\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\t\n\x05\x45RROR\x10\x03\x12\x0c\n\x08\x44\x45LETING\x10\x04\x12\x0c\n\x08UPDATING\x10\x05"<\n\x08Substate\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\r\n\tUNHEALTHY\x10\x01\x12\x10\n\x0cSTALE_STATUS\x10\x02"S\n\x0eSecurityConfig\x12\x41\n\x0fkerberos_config\x18\x01 \x01(\x0b\x32(.google.cloud.dataproc.v1.KerberosConfig"\x90\x04\n\x0eKerberosConfig\x12\x1c\n\x0f\x65nable_kerberos\x18\x01 \x01(\x08\x42\x03\xe0\x41\x01\x12(\n\x1broot_principal_password_uri\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x18\n\x0bkms_key_uri\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0ckeystore_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0etruststore_uri\x18\x05 \x01(\tB\x03\xe0\x41\x01\x12"\n\x15keystore_password_uri\x18\x06 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10key_password_uri\x18\x07 \x01(\tB\x03\xe0\x41\x01\x12$\n\x17truststore_password_uri\x18\x08 \x01(\tB\x03\xe0\x41\x01\x12$\n\x17\x63ross_realm_trust_realm\x18\t \x01(\tB\x03\xe0\x41\x01\x12"\n\x15\x63ross_realm_trust_kdc\x18\n \x01(\tB\x03\xe0\x41\x01\x12+\n\x1e\x63ross_realm_trust_admin_server\x18\x0b \x01(\tB\x03\xe0\x41\x01\x12\x32\n%cross_realm_trust_shared_password_uri\x18\x0c \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0ekdc_db_key_uri\x18\r \x01(\tB\x03\xe0\x41\x01\x12\x1f\n\x12tgt_lifetime_hours\x18\x0e \x01(\x05\x42\x03\xe0\x41\x01\x12\x12\n\x05realm\x18\x0f \x01(\tB\x03\xe0\x41\x01"\xf9\x01\n\x0eSoftwareConfig\x12\x1a\n\rimage_version\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12Q\n\nproperties\x18\x02 \x03(\x0b\x32\x38.google.cloud.dataproc.v1.SoftwareConfig.PropertiesEntryB\x03\xe0\x41\x01\x12\x45\n\x13optional_components\x18\x03 \x03(\x0e\x32#.google.cloud.dataproc.v1.ComponentB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x9a\x02\n\x0e\x43lusterMetrics\x12O\n\x0chdfs_metrics\x18\x01 \x03(\x0b\x32\x39.google.cloud.dataproc.v1.ClusterMetrics.HdfsMetricsEntry\x12O\n\x0cyarn_metrics\x18\x02 \x03(\x0b\x32\x39.google.cloud.dataproc.v1.ClusterMetrics.YarnMetricsEntry\x1a\x32\n\x10HdfsMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1a\x32\n\x10YarnMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01"\x96\x01\n\x14\x43reateClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x02 \x01(\x0b\x32!.google.cloud.dataproc.v1.ClusterB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x04 \x01(\tB\x03\xe0\x41\x01"\xae\x02\n\x14UpdateClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x05 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.cloud.dataproc.v1.ClusterB\x03\xe0\x41\x02\x12\x45\n\x1dgraceful_decommission_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01\x12\x34\n\x0bupdate_mask\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x07 \x01(\tB\x03\xe0\x41\x01"\x93\x01\n\x14\x44\x65leteClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_uuid\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x17\n\nrequest_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\\\n\x11GetClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02"\x89\x01\n\x13ListClustersRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x04 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06\x66ilter\x18\x05 \x01(\tB\x03\xe0\x41\x01\x12\x16\n\tpage_size\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x17\n\npage_token\x18\x03 \x01(\tB\x03\xe0\x41\x01"n\n\x14ListClustersResponse\x12\x38\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.cloud.dataproc.v1.ClusterB\x03\xe0\x41\x03\x12\x1c\n\x0fnext_page_token\x18\x02 \x01(\tB\x03\xe0\x41\x03"a\n\x16\x44iagnoseClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02"1\n\x16\x44iagnoseClusterResults\x12\x17\n\noutput_uri\x18\x01 \x01(\tB\x03\xe0\x41\x03\x32\xe3\x0c\n\x11\x43lusterController\x12\x80\x02\n\rCreateCluster\x12..google.cloud.dataproc.v1.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"\x9f\x01\x82\xd3\xe4\x93\x02>"3/v1/projects/{project_id}/regions/{region}/clusters:\x07\x63luster\xda\x41\x19project_id,region,cluster\xca\x41<\n\x07\x43luster\x12\x31google.cloud.dataproc.v1.ClusterOperationMetadata\x12\xa8\x02\n\rUpdateCluster\x12..google.cloud.dataproc.v1.UpdateClusterRequest\x1a\x1d.google.longrunning.Operation"\xc7\x01\x82\xd3\xe4\x93\x02M2B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:\x07\x63luster\xca\x41<\n\x07\x43luster\x12\x31google.cloud.dataproc.v1.ClusterOperationMetadata\xda\x41\x32project_id,region,cluster_name,cluster,update_mask\x12\x99\x02\n\rDeleteCluster\x12..google.cloud.dataproc.v1.DeleteClusterRequest\x1a\x1d.google.longrunning.Operation"\xb8\x01\x82\xd3\xe4\x93\x02\x44*B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}\xda\x41\x1eproject_id,region,cluster_name\xca\x41J\n\x15google.protobuf.Empty\x12\x31google.cloud.dataproc.v1.ClusterOperationMetadata\x12\xc9\x01\n\nGetCluster\x12+.google.cloud.dataproc.v1.GetClusterRequest\x1a!.google.cloud.dataproc.v1.Cluster"k\x82\xd3\xe4\x93\x02\x44\x12\x42/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}\xda\x41\x1eproject_id,region,cluster_name\x12\xd9\x01\n\x0cListClusters\x12-.google.cloud.dataproc.v1.ListClustersRequest\x1a..google.cloud.dataproc.v1.ListClustersResponse"j\x82\xd3\xe4\x93\x02\x35\x12\x33/v1/projects/{project_id}/regions/{region}/clusters\xda\x41\x11project_id,region\xda\x41\x18project_id,region,filter\x12\x8e\x02\n\x0f\x44iagnoseCluster\x12\x30.google.cloud.dataproc.v1.DiagnoseClusterRequest\x1a\x1d.google.longrunning.Operation"\xa9\x01\x82\xd3\xe4\x93\x02P"K/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose:\x01*\xda\x41\x1eproject_id,region,cluster_name\xca\x41/\n\x15google.protobuf.Empty\x12\x16\x44iagnoseClusterResults\x1aK\xca\x41\x17\x64\x61taproc.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformBq\n\x1c\x63om.google.cloud.dataproc.v1B\rClustersProtoP\x01Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataprocb\x06proto3' + '\n-google/cloud/dataproc_v1/proto/clusters.proto\x12\x18google.cloud.dataproc.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a+google/cloud/dataproc_v1/proto/shared.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xc8\x03\n\x07\x43luster\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12<\n\x06\x63onfig\x18\x03 \x01(\x0b\x32\'.google.cloud.dataproc.v1.ClusterConfigB\x03\xe0\x41\x02\x12\x42\n\x06labels\x18\x08 \x03(\x0b\x32-.google.cloud.dataproc.v1.Cluster.LabelsEntryB\x03\xe0\x41\x01\x12<\n\x06status\x18\x04 \x01(\x0b\x32\'.google.cloud.dataproc.v1.ClusterStatusB\x03\xe0\x41\x03\x12\x44\n\x0estatus_history\x18\x07 \x03(\x0b\x32\'.google.cloud.dataproc.v1.ClusterStatusB\x03\xe0\x41\x03\x12\x19\n\x0c\x63luster_uuid\x18\x06 \x01(\tB\x03\xe0\x41\x03\x12\x39\n\x07metrics\x18\t \x01(\x0b\x32(.google.cloud.dataproc.v1.ClusterMetrics\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xb0\x06\n\rClusterConfig\x12\x1a\n\rconfig_bucket\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12K\n\x12gce_cluster_config\x18\x08 \x01(\x0b\x32*.google.cloud.dataproc.v1.GceClusterConfigB\x03\xe0\x41\x01\x12I\n\rmaster_config\x18\t \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfigB\x03\xe0\x41\x01\x12I\n\rworker_config\x18\n \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfigB\x03\xe0\x41\x01\x12S\n\x17secondary_worker_config\x18\x0c \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfigB\x03\xe0\x41\x01\x12\x46\n\x0fsoftware_config\x18\r \x01(\x0b\x32(.google.cloud.dataproc.v1.SoftwareConfigB\x03\xe0\x41\x01\x12W\n\x16initialization_actions\x18\x0b \x03(\x0b\x32\x32.google.cloud.dataproc.v1.NodeInitializationActionB\x03\xe0\x41\x01\x12J\n\x11\x65ncryption_config\x18\x0f \x01(\x0b\x32*.google.cloud.dataproc.v1.EncryptionConfigB\x03\xe0\x41\x01\x12L\n\x12\x61utoscaling_config\x18\x12 \x01(\x0b\x32+.google.cloud.dataproc.v1.AutoscalingConfigB\x03\xe0\x41\x01\x12\x46\n\x0fsecurity_config\x18\x10 \x01(\x0b\x32(.google.cloud.dataproc.v1.SecurityConfigB\x03\xe0\x41\x01\x12H\n\x10lifecycle_config\x18\x11 \x01(\x0b\x32).google.cloud.dataproc.v1.LifecycleConfigB\x03\xe0\x41\x01",\n\x11\x41utoscalingConfig\x12\x17\n\npolicy_uri\x18\x01 \x01(\tB\x03\xe0\x41\x01"4\n\x10\x45ncryptionConfig\x12 \n\x13gce_pd_kms_key_name\x18\x01 \x01(\tB\x03\xe0\x41\x01"\x9f\x03\n\x10GceClusterConfig\x12\x15\n\x08zone_uri\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0bnetwork_uri\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0esubnetwork_uri\x18\x06 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10internal_ip_only\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12\x1c\n\x0fservice_account\x18\x08 \x01(\tB\x03\xe0\x41\x01\x12#\n\x16service_account_scopes\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x0c\n\x04tags\x18\x04 \x03(\t\x12J\n\x08metadata\x18\x05 \x03(\x0b\x32\x38.google.cloud.dataproc.v1.GceClusterConfig.MetadataEntry\x12P\n\x14reservation_affinity\x18\x0b \x01(\x0b\x32-.google.cloud.dataproc.v1.ReservationAffinityB\x03\xe0\x41\x01\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x9a\x03\n\x13InstanceGroupConfig\x12\x1a\n\rnum_instances\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1b\n\x0einstance_names\x18\x02 \x03(\tB\x03\xe0\x41\x03\x12\x16\n\timage_uri\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10machine_type_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12>\n\x0b\x64isk_config\x18\x05 \x01(\x0b\x32$.google.cloud.dataproc.v1.DiskConfigB\x03\xe0\x41\x01\x12\x1b\n\x0eis_preemptible\x18\x06 \x01(\x08\x42\x03\xe0\x41\x01\x12O\n\x14managed_group_config\x18\x07 \x01(\x0b\x32,.google.cloud.dataproc.v1.ManagedGroupConfigB\x03\xe0\x41\x03\x12\x46\n\x0c\x61\x63\x63\x65lerators\x18\x08 \x03(\x0b\x32+.google.cloud.dataproc.v1.AcceleratorConfigB\x03\xe0\x41\x01\x12\x1d\n\x10min_cpu_platform\x18\t \x01(\tB\x03\xe0\x41\x01"c\n\x12ManagedGroupConfig\x12#\n\x16instance_template_name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12(\n\x1binstance_group_manager_name\x18\x02 \x01(\tB\x03\xe0\x41\x03"L\n\x11\x41\x63\x63\x65leratorConfig\x12\x1c\n\x14\x61\x63\x63\x65lerator_type_uri\x18\x01 \x01(\t\x12\x19\n\x11\x61\x63\x63\x65lerator_count\x18\x02 \x01(\x05"f\n\nDiskConfig\x12\x1b\n\x0e\x62oot_disk_type\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x1e\n\x11\x62oot_disk_size_gb\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1b\n\x0enum_local_ssds\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01"s\n\x18NodeInitializationAction\x12\x1c\n\x0f\x65xecutable_file\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x39\n\x11\x65xecution_timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01"\x84\x03\n\rClusterStatus\x12\x41\n\x05state\x18\x01 \x01(\x0e\x32-.google.cloud.dataproc.v1.ClusterStatus.StateB\x03\xe0\x41\x03\x12\x16\n\x06\x64\x65tail\x18\x02 \x01(\tB\x06\xe0\x41\x03\xe0\x41\x01\x12\x39\n\x10state_start_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12G\n\x08substate\x18\x04 \x01(\x0e\x32\x30.google.cloud.dataproc.v1.ClusterStatus.SubstateB\x03\xe0\x41\x03"V\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\t\n\x05\x45RROR\x10\x03\x12\x0c\n\x08\x44\x45LETING\x10\x04\x12\x0c\n\x08UPDATING\x10\x05"<\n\x08Substate\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\r\n\tUNHEALTHY\x10\x01\x12\x10\n\x0cSTALE_STATUS\x10\x02"S\n\x0eSecurityConfig\x12\x41\n\x0fkerberos_config\x18\x01 \x01(\x0b\x32(.google.cloud.dataproc.v1.KerberosConfig"\x90\x04\n\x0eKerberosConfig\x12\x1c\n\x0f\x65nable_kerberos\x18\x01 \x01(\x08\x42\x03\xe0\x41\x01\x12(\n\x1broot_principal_password_uri\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x18\n\x0bkms_key_uri\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0ckeystore_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0etruststore_uri\x18\x05 \x01(\tB\x03\xe0\x41\x01\x12"\n\x15keystore_password_uri\x18\x06 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10key_password_uri\x18\x07 \x01(\tB\x03\xe0\x41\x01\x12$\n\x17truststore_password_uri\x18\x08 \x01(\tB\x03\xe0\x41\x01\x12$\n\x17\x63ross_realm_trust_realm\x18\t \x01(\tB\x03\xe0\x41\x01\x12"\n\x15\x63ross_realm_trust_kdc\x18\n \x01(\tB\x03\xe0\x41\x01\x12+\n\x1e\x63ross_realm_trust_admin_server\x18\x0b \x01(\tB\x03\xe0\x41\x01\x12\x32\n%cross_realm_trust_shared_password_uri\x18\x0c \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0ekdc_db_key_uri\x18\r \x01(\tB\x03\xe0\x41\x01\x12\x1f\n\x12tgt_lifetime_hours\x18\x0e \x01(\x05\x42\x03\xe0\x41\x01\x12\x12\n\x05realm\x18\x0f \x01(\tB\x03\xe0\x41\x01"\xf9\x01\n\x0eSoftwareConfig\x12\x1a\n\rimage_version\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12Q\n\nproperties\x18\x02 \x03(\x0b\x32\x38.google.cloud.dataproc.v1.SoftwareConfig.PropertiesEntryB\x03\xe0\x41\x01\x12\x45\n\x13optional_components\x18\x03 \x03(\x0e\x32#.google.cloud.dataproc.v1.ComponentB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x83\x02\n\x0fLifecycleConfig\x12\x37\n\x0fidle_delete_ttl\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01\x12;\n\x10\x61uto_delete_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x01H\x00\x12\x39\n\x0f\x61uto_delete_ttl\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01H\x00\x12\x38\n\x0fidle_start_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x42\x05\n\x03ttl"\x9a\x02\n\x0e\x43lusterMetrics\x12O\n\x0chdfs_metrics\x18\x01 \x03(\x0b\x32\x39.google.cloud.dataproc.v1.ClusterMetrics.HdfsMetricsEntry\x12O\n\x0cyarn_metrics\x18\x02 \x03(\x0b\x32\x39.google.cloud.dataproc.v1.ClusterMetrics.YarnMetricsEntry\x1a\x32\n\x10HdfsMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1a\x32\n\x10YarnMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01"\x96\x01\n\x14\x43reateClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x02 \x01(\x0b\x32!.google.cloud.dataproc.v1.ClusterB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x04 \x01(\tB\x03\xe0\x41\x01"\xae\x02\n\x14UpdateClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x05 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.cloud.dataproc.v1.ClusterB\x03\xe0\x41\x02\x12\x45\n\x1dgraceful_decommission_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01\x12\x34\n\x0bupdate_mask\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x07 \x01(\tB\x03\xe0\x41\x01"\x93\x01\n\x14\x44\x65leteClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_uuid\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x17\n\nrequest_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\\\n\x11GetClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02"\x89\x01\n\x13ListClustersRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x04 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06\x66ilter\x18\x05 \x01(\tB\x03\xe0\x41\x01\x12\x16\n\tpage_size\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x17\n\npage_token\x18\x03 \x01(\tB\x03\xe0\x41\x01"n\n\x14ListClustersResponse\x12\x38\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.cloud.dataproc.v1.ClusterB\x03\xe0\x41\x03\x12\x1c\n\x0fnext_page_token\x18\x02 \x01(\tB\x03\xe0\x41\x03"a\n\x16\x44iagnoseClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02"1\n\x16\x44iagnoseClusterResults\x12\x17\n\noutput_uri\x18\x01 \x01(\tB\x03\xe0\x41\x03"\xf8\x01\n\x13ReservationAffinity\x12Y\n\x18\x63onsume_reservation_type\x18\x01 \x01(\x0e\x32\x32.google.cloud.dataproc.v1.ReservationAffinity.TypeB\x03\xe0\x41\x01\x12\x10\n\x03key\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x13\n\x06values\x18\x03 \x03(\tB\x03\xe0\x41\x01"_\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x12\n\x0eNO_RESERVATION\x10\x01\x12\x13\n\x0f\x41NY_RESERVATION\x10\x02\x12\x18\n\x14SPECIFIC_RESERVATION\x10\x03\x32\xe3\x0c\n\x11\x43lusterController\x12\x80\x02\n\rCreateCluster\x12..google.cloud.dataproc.v1.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"\x9f\x01\x82\xd3\xe4\x93\x02>"3/v1/projects/{project_id}/regions/{region}/clusters:\x07\x63luster\xda\x41\x19project_id,region,cluster\xca\x41<\n\x07\x43luster\x12\x31google.cloud.dataproc.v1.ClusterOperationMetadata\x12\xa8\x02\n\rUpdateCluster\x12..google.cloud.dataproc.v1.UpdateClusterRequest\x1a\x1d.google.longrunning.Operation"\xc7\x01\x82\xd3\xe4\x93\x02M2B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:\x07\x63luster\xda\x41\x32project_id,region,cluster_name,cluster,update_mask\xca\x41<\n\x07\x43luster\x12\x31google.cloud.dataproc.v1.ClusterOperationMetadata\x12\x99\x02\n\rDeleteCluster\x12..google.cloud.dataproc.v1.DeleteClusterRequest\x1a\x1d.google.longrunning.Operation"\xb8\x01\x82\xd3\xe4\x93\x02\x44*B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}\xda\x41\x1eproject_id,region,cluster_name\xca\x41J\n\x15google.protobuf.Empty\x12\x31google.cloud.dataproc.v1.ClusterOperationMetadata\x12\xc9\x01\n\nGetCluster\x12+.google.cloud.dataproc.v1.GetClusterRequest\x1a!.google.cloud.dataproc.v1.Cluster"k\x82\xd3\xe4\x93\x02\x44\x12\x42/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}\xda\x41\x1eproject_id,region,cluster_name\x12\xd9\x01\n\x0cListClusters\x12-.google.cloud.dataproc.v1.ListClustersRequest\x1a..google.cloud.dataproc.v1.ListClustersResponse"j\x82\xd3\xe4\x93\x02\x35\x12\x33/v1/projects/{project_id}/regions/{region}/clusters\xda\x41\x11project_id,region\xda\x41\x18project_id,region,filter\x12\x8e\x02\n\x0f\x44iagnoseCluster\x12\x30.google.cloud.dataproc.v1.DiagnoseClusterRequest\x1a\x1d.google.longrunning.Operation"\xa9\x01\x82\xd3\xe4\x93\x02P"K/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose:\x01*\xda\x41\x1eproject_id,region,cluster_name\xca\x41/\n\x15google.protobuf.Empty\x12\x16\x44iagnoseClusterResults\x1aK\xca\x41\x17\x64\x61taproc.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformBq\n\x1c\x63om.google.cloud.dataproc.v1B\rClustersProtoP\x01Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataprocb\x06proto3' ), dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_operations__pb2.DESCRIPTOR, google_dot_cloud_dot_dataproc__v1_dot_proto_dot_shared__pb2.DESCRIPTOR, google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, @@ -83,8 +79,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3087, - serialized_end=3173, + serialized_start=3194, + serialized_end=3280, ) _sym_db.RegisterEnumDescriptor(_CLUSTERSTATUS_STATE) @@ -106,11 +102,49 @@ ], containing_type=None, serialized_options=None, - serialized_start=3175, - serialized_end=3235, + serialized_start=3282, + serialized_end=3342, ) _sym_db.RegisterEnumDescriptor(_CLUSTERSTATUS_SUBSTATE) +_RESERVATIONAFFINITY_TYPE = _descriptor.EnumDescriptor( + name="Type", + full_name="google.cloud.dataproc.v1.ReservationAffinity.Type", + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name="TYPE_UNSPECIFIED", + index=0, + number=0, + serialized_options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name="NO_RESERVATION", index=1, number=1, serialized_options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="ANY_RESERVATION", + index=2, + number=2, + serialized_options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name="SPECIFIC_RESERVATION", + index=3, + number=3, + serialized_options=None, + type=None, + ), + ], + containing_type=None, + serialized_options=None, + serialized_start=6017, + serialized_end=6112, +) +_sym_db.RegisterEnumDescriptor(_RESERVATIONAFFINITY_TYPE) + _CLUSTER_LABELSENTRY = _descriptor.Descriptor( name="LabelsEntry", @@ -164,8 +198,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=805, - serialized_end=850, + serialized_start=756, + serialized_end=801, ) _CLUSTER = _descriptor.Descriptor( @@ -328,8 +362,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=394, - serialized_end=850, + serialized_start=345, + serialized_end=801, ) @@ -520,6 +554,24 @@ serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), + _descriptor.FieldDescriptor( + name="lifecycle_config", + full_name="google.cloud.dataproc.v1.ClusterConfig.lifecycle_config", + index=10, + number=17, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), ], extensions=[], nested_types=[], @@ -529,8 +581,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=853, - serialized_end=1595, + serialized_start=804, + serialized_end=1620, ) @@ -568,8 +620,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1597, - serialized_end=1641, + serialized_start=1622, + serialized_end=1666, ) @@ -607,8 +659,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1643, - serialized_end=1695, + serialized_start=1668, + serialized_end=1720, ) @@ -664,8 +716,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1984, - serialized_end=2031, + serialized_start=2091, + serialized_end=2138, ) _GCECLUSTERCONFIG = _descriptor.Descriptor( @@ -819,6 +871,24 @@ serialized_options=None, file=DESCRIPTOR, ), + _descriptor.FieldDescriptor( + name="reservation_affinity", + full_name="google.cloud.dataproc.v1.GceClusterConfig.reservation_affinity", + index=8, + number=11, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), ], extensions=[], nested_types=[_GCECLUSTERCONFIG_METADATAENTRY], @@ -828,8 +898,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1698, - serialized_end=2031, + serialized_start=1723, + serialized_end=2138, ) @@ -1011,8 +1081,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2034, - serialized_end=2444, + serialized_start=2141, + serialized_end=2551, ) @@ -1068,8 +1138,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2446, - serialized_end=2545, + serialized_start=2553, + serialized_end=2652, ) @@ -1125,8 +1195,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2547, - serialized_end=2623, + serialized_start=2654, + serialized_end=2730, ) @@ -1200,8 +1270,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2625, - serialized_end=2727, + serialized_start=2732, + serialized_end=2834, ) @@ -1257,8 +1327,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2729, - serialized_end=2844, + serialized_start=2836, + serialized_end=2951, ) @@ -1350,8 +1420,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2847, - serialized_end=3235, + serialized_start=2954, + serialized_end=3342, ) @@ -1389,8 +1459,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3237, - serialized_end=3320, + serialized_start=3344, + serialized_end=3427, ) @@ -1680,8 +1750,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3323, - serialized_end=3851, + serialized_start=3430, + serialized_end=3958, ) @@ -1737,8 +1807,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4054, - serialized_end=4103, + serialized_start=4161, + serialized_end=4210, ) _SOFTWARECONFIG = _descriptor.Descriptor( @@ -1811,8 +1881,109 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3854, - serialized_end=4103, + serialized_start=3961, + serialized_end=4210, +) + + +_LIFECYCLECONFIG = _descriptor.Descriptor( + name="LifecycleConfig", + full_name="google.cloud.dataproc.v1.LifecycleConfig", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="idle_delete_ttl", + full_name="google.cloud.dataproc.v1.LifecycleConfig.idle_delete_ttl", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="auto_delete_time", + full_name="google.cloud.dataproc.v1.LifecycleConfig.auto_delete_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="auto_delete_ttl", + full_name="google.cloud.dataproc.v1.LifecycleConfig.auto_delete_ttl", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="idle_start_time", + full_name="google.cloud.dataproc.v1.LifecycleConfig.idle_start_time", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\003"), + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="ttl", + full_name="google.cloud.dataproc.v1.LifecycleConfig.ttl", + index=0, + containing_type=None, + fields=[], + ) + ], + serialized_start=4213, + serialized_end=4472, ) @@ -1868,8 +2039,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4286, - serialized_end=4336, + serialized_start=4655, + serialized_end=4705, ) _CLUSTERMETRICS_YARNMETRICSENTRY = _descriptor.Descriptor( @@ -1924,8 +2095,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4338, - serialized_end=4388, + serialized_start=4707, + serialized_end=4757, ) _CLUSTERMETRICS = _descriptor.Descriptor( @@ -1980,8 +2151,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4106, - serialized_end=4388, + serialized_start=4475, + serialized_end=4757, ) @@ -2073,8 +2244,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4391, - serialized_end=4541, + serialized_start=4760, + serialized_end=4910, ) @@ -2220,8 +2391,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4544, - serialized_end=4846, + serialized_start=4913, + serialized_end=5215, ) @@ -2331,8 +2502,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4849, - serialized_end=4996, + serialized_start=5218, + serialized_end=5365, ) @@ -2406,8 +2577,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4998, - serialized_end=5090, + serialized_start=5367, + serialized_end=5459, ) @@ -2517,8 +2688,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5093, - serialized_end=5230, + serialized_start=5462, + serialized_end=5599, ) @@ -2574,8 +2745,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5232, - serialized_end=5342, + serialized_start=5601, + serialized_end=5711, ) @@ -2649,8 +2820,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5344, - serialized_end=5441, + serialized_start=5713, + serialized_end=5810, ) @@ -2688,8 +2859,83 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5443, - serialized_end=5492, + serialized_start=5812, + serialized_end=5861, +) + + +_RESERVATIONAFFINITY = _descriptor.Descriptor( + name="ReservationAffinity", + full_name="google.cloud.dataproc.v1.ReservationAffinity", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="consume_reservation_type", + full_name="google.cloud.dataproc.v1.ReservationAffinity.consume_reservation_type", + index=0, + number=1, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="key", + full_name="google.cloud.dataproc.v1.ReservationAffinity.key", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="values", + full_name="google.cloud.dataproc.v1.ReservationAffinity.values", + index=2, + number=3, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[_RESERVATIONAFFINITY_TYPE], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=5864, + serialized_end=6112, ) _CLUSTER_LABELSENTRY.containing_type = _CLUSTER @@ -2711,10 +2957,14 @@ _CLUSTERCONFIG.fields_by_name["encryption_config"].message_type = _ENCRYPTIONCONFIG _CLUSTERCONFIG.fields_by_name["autoscaling_config"].message_type = _AUTOSCALINGCONFIG _CLUSTERCONFIG.fields_by_name["security_config"].message_type = _SECURITYCONFIG +_CLUSTERCONFIG.fields_by_name["lifecycle_config"].message_type = _LIFECYCLECONFIG _GCECLUSTERCONFIG_METADATAENTRY.containing_type = _GCECLUSTERCONFIG _GCECLUSTERCONFIG.fields_by_name[ "metadata" ].message_type = _GCECLUSTERCONFIG_METADATAENTRY +_GCECLUSTERCONFIG.fields_by_name[ + "reservation_affinity" +].message_type = _RESERVATIONAFFINITY _INSTANCEGROUPCONFIG.fields_by_name["disk_config"].message_type = _DISKCONFIG _INSTANCEGROUPCONFIG.fields_by_name[ "managed_group_config" @@ -2738,6 +2988,30 @@ _SOFTWARECONFIG.fields_by_name[ "optional_components" ].enum_type = google_dot_cloud_dot_dataproc__v1_dot_proto_dot_shared__pb2._COMPONENT +_LIFECYCLECONFIG.fields_by_name[ + "idle_delete_ttl" +].message_type = google_dot_protobuf_dot_duration__pb2._DURATION +_LIFECYCLECONFIG.fields_by_name[ + "auto_delete_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_LIFECYCLECONFIG.fields_by_name[ + "auto_delete_ttl" +].message_type = google_dot_protobuf_dot_duration__pb2._DURATION +_LIFECYCLECONFIG.fields_by_name[ + "idle_start_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_LIFECYCLECONFIG.oneofs_by_name["ttl"].fields.append( + _LIFECYCLECONFIG.fields_by_name["auto_delete_time"] +) +_LIFECYCLECONFIG.fields_by_name[ + "auto_delete_time" +].containing_oneof = _LIFECYCLECONFIG.oneofs_by_name["ttl"] +_LIFECYCLECONFIG.oneofs_by_name["ttl"].fields.append( + _LIFECYCLECONFIG.fields_by_name["auto_delete_ttl"] +) +_LIFECYCLECONFIG.fields_by_name[ + "auto_delete_ttl" +].containing_oneof = _LIFECYCLECONFIG.oneofs_by_name["ttl"] _CLUSTERMETRICS_HDFSMETRICSENTRY.containing_type = _CLUSTERMETRICS _CLUSTERMETRICS_YARNMETRICSENTRY.containing_type = _CLUSTERMETRICS _CLUSTERMETRICS.fields_by_name[ @@ -2755,6 +3029,10 @@ "update_mask" ].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK _LISTCLUSTERSRESPONSE.fields_by_name["clusters"].message_type = _CLUSTER +_RESERVATIONAFFINITY.fields_by_name[ + "consume_reservation_type" +].enum_type = _RESERVATIONAFFINITY_TYPE +_RESERVATIONAFFINITY_TYPE.containing_type = _RESERVATIONAFFINITY DESCRIPTOR.message_types_by_name["Cluster"] = _CLUSTER DESCRIPTOR.message_types_by_name["ClusterConfig"] = _CLUSTERCONFIG DESCRIPTOR.message_types_by_name["AutoscalingConfig"] = _AUTOSCALINGCONFIG @@ -2769,6 +3047,7 @@ DESCRIPTOR.message_types_by_name["SecurityConfig"] = _SECURITYCONFIG DESCRIPTOR.message_types_by_name["KerberosConfig"] = _KERBEROSCONFIG DESCRIPTOR.message_types_by_name["SoftwareConfig"] = _SOFTWARECONFIG +DESCRIPTOR.message_types_by_name["LifecycleConfig"] = _LIFECYCLECONFIG DESCRIPTOR.message_types_by_name["ClusterMetrics"] = _CLUSTERMETRICS DESCRIPTOR.message_types_by_name["CreateClusterRequest"] = _CREATECLUSTERREQUEST DESCRIPTOR.message_types_by_name["UpdateClusterRequest"] = _UPDATECLUSTERREQUEST @@ -2778,6 +3057,7 @@ DESCRIPTOR.message_types_by_name["ListClustersResponse"] = _LISTCLUSTERSRESPONSE DESCRIPTOR.message_types_by_name["DiagnoseClusterRequest"] = _DIAGNOSECLUSTERREQUEST DESCRIPTOR.message_types_by_name["DiagnoseClusterResults"] = _DIAGNOSECLUSTERRESULTS +DESCRIPTOR.message_types_by_name["ReservationAffinity"] = _RESERVATIONAFFINITY _sym_db.RegisterFileDescriptor(DESCRIPTOR) Cluster = _reflection.GeneratedProtocolMessageType( @@ -2888,6 +3168,8 @@ unset. security_config: Optional. Security settings for the cluster. + lifecycle_config: + Optional. Lifecycle setting for the cluster. """, # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterConfig) ), @@ -3025,6 +3307,9 @@ (see `Project and instance metadata `__). + reservation_affinity: + Optional. Reservation Affinity for consuming Zonal + reservation. """, # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GceClusterConfig) ), @@ -3038,8 +3323,8 @@ dict( DESCRIPTOR=_INSTANCEGROUPCONFIG, __module__="google.cloud.dataproc_v1.proto.clusters_pb2", - __doc__="""Optional. The config settings for Compute Engine resources - in an instance group, such as a master or worker group. + __doc__="""The config settings for Compute Engine resources in an + instance group, such as a master or worker group. Attributes: @@ -3193,10 +3478,12 @@ Required. Cloud Storage URI of executable file. execution_timeout: Optional. Amount of time executable has to complete. Default - is 10 minutes. Cluster creation fails with an explanatory - error message (the name of the executable that caused the - error and the exceeded timeout period) if the executable is - not completed at end of the timeout period. + is 10 minutes (see JSON representation of `Duration + `__). Cluster creation fails with an + explanatory error message (the name of the executable that + caused the error and the exceeded timeout period) if the + executable is not completed at end of the timeout period. """, # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.NodeInitializationAction) ), @@ -3218,7 +3505,10 @@ detail: Optional. Output only. Details of cluster's state. state_start_time: - Output only. Time when this state was entered. + Output only. Time when this state was entered (see JSON + representation of `Timestamp + `__). substate: Output only. Additional state information that includes status reported by the agent. @@ -3368,6 +3658,49 @@ _sym_db.RegisterMessage(SoftwareConfig) _sym_db.RegisterMessage(SoftwareConfig.PropertiesEntry) +LifecycleConfig = _reflection.GeneratedProtocolMessageType( + "LifecycleConfig", + (_message.Message,), + dict( + DESCRIPTOR=_LIFECYCLECONFIG, + __module__="google.cloud.dataproc_v1.proto.clusters_pb2", + __doc__="""Specifies the cluster auto-delete schedule configuration. + + + Attributes: + idle_delete_ttl: + Optional. The duration to keep the cluster alive while idling + (when no jobs are running). Passing this threshold will cause + the cluster to be deleted. Minimum value is 10 minutes; + maximum value is 14 days (see JSON representation of `Duration + `__. + ttl: + Either the exact time the cluster should be deleted at or the + cluster maximum age. + auto_delete_time: + Optional. The time when cluster will be auto-deleted (see JSON + representation of `Timestamp + `__). + auto_delete_ttl: + Optional. The lifetime duration of cluster. The cluster will + be auto-deleted at the end of this period. Minimum value is 10 + minutes; maximum value is 14 days (see JSON representation of + `Duration `__). + idle_start_time: + Output only. The time when cluster became idle (most recent + job finished) and became eligible for deletion due to idleness + (see JSON representation of `Timestamp + `__). + """, + # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.LifecycleConfig) + ), +) +_sym_db.RegisterMessage(LifecycleConfig) + ClusterMetrics = _reflection.GeneratedProtocolMessageType( "ClusterMetrics", (_message.Message,), @@ -3473,8 +3806,10 @@ wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum allowed timeout - is 1 day. Only supported on Dataproc image versions 1.2 and - higher. + is 1 day. (see JSON representation of `Duration + `__). Only supported on Dataproc + image versions 1.2 and higher. update_mask: Required. Specifies the path, relative to ``Cluster``, of the field to update. For example, to change the number of workers @@ -3698,6 +4033,30 @@ ) _sym_db.RegisterMessage(DiagnoseClusterResults) +ReservationAffinity = _reflection.GeneratedProtocolMessageType( + "ReservationAffinity", + (_message.Message,), + dict( + DESCRIPTOR=_RESERVATIONAFFINITY, + __module__="google.cloud.dataproc_v1.proto.clusters_pb2", + __doc__="""Reservation Affinity for consuming Zonal reservation. + + + Attributes: + consume_reservation_type: + Optional. Type of reservation to consume + key: + Optional. Corresponds to the label key of reservation + resource. + values: + Optional. Corresponds to the label values of reservation + resource. + """, + # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ReservationAffinity) + ), +) +_sym_db.RegisterMessage(ReservationAffinity) + DESCRIPTOR._options = None _CLUSTER_LABELSENTRY._options = None @@ -3718,6 +4077,7 @@ _CLUSTERCONFIG.fields_by_name["encryption_config"]._options = None _CLUSTERCONFIG.fields_by_name["autoscaling_config"]._options = None _CLUSTERCONFIG.fields_by_name["security_config"]._options = None +_CLUSTERCONFIG.fields_by_name["lifecycle_config"]._options = None _AUTOSCALINGCONFIG.fields_by_name["policy_uri"]._options = None _ENCRYPTIONCONFIG.fields_by_name["gce_pd_kms_key_name"]._options = None _GCECLUSTERCONFIG_METADATAENTRY._options = None @@ -3727,6 +4087,7 @@ _GCECLUSTERCONFIG.fields_by_name["internal_ip_only"]._options = None _GCECLUSTERCONFIG.fields_by_name["service_account"]._options = None _GCECLUSTERCONFIG.fields_by_name["service_account_scopes"]._options = None +_GCECLUSTERCONFIG.fields_by_name["reservation_affinity"]._options = None _INSTANCEGROUPCONFIG.fields_by_name["num_instances"]._options = None _INSTANCEGROUPCONFIG.fields_by_name["instance_names"]._options = None _INSTANCEGROUPCONFIG.fields_by_name["image_uri"]._options = None @@ -3766,6 +4127,10 @@ _SOFTWARECONFIG.fields_by_name["image_version"]._options = None _SOFTWARECONFIG.fields_by_name["properties"]._options = None _SOFTWARECONFIG.fields_by_name["optional_components"]._options = None +_LIFECYCLECONFIG.fields_by_name["idle_delete_ttl"]._options = None +_LIFECYCLECONFIG.fields_by_name["auto_delete_time"]._options = None +_LIFECYCLECONFIG.fields_by_name["auto_delete_ttl"]._options = None +_LIFECYCLECONFIG.fields_by_name["idle_start_time"]._options = None _CLUSTERMETRICS_HDFSMETRICSENTRY._options = None _CLUSTERMETRICS_YARNMETRICSENTRY._options = None _CREATECLUSTERREQUEST.fields_by_name["project_id"]._options = None @@ -3798,6 +4163,9 @@ _DIAGNOSECLUSTERREQUEST.fields_by_name["region"]._options = None _DIAGNOSECLUSTERREQUEST.fields_by_name["cluster_name"]._options = None _DIAGNOSECLUSTERRESULTS.fields_by_name["output_uri"]._options = None +_RESERVATIONAFFINITY.fields_by_name["consume_reservation_type"]._options = None +_RESERVATIONAFFINITY.fields_by_name["key"]._options = None +_RESERVATIONAFFINITY.fields_by_name["values"]._options = None _CLUSTERCONTROLLER = _descriptor.ServiceDescriptor( name="ClusterController", @@ -3807,8 +4175,8 @@ serialized_options=_b( "\312A\027dataproc.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" ), - serialized_start=5495, - serialized_end=7130, + serialized_start=6115, + serialized_end=7750, methods=[ _descriptor.MethodDescriptor( name="CreateCluster", @@ -3829,7 +4197,7 @@ input_type=_UPDATECLUSTERREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=_b( - "\202\323\344\223\002M2B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:\007cluster\312A<\n\007Cluster\0221google.cloud.dataproc.v1.ClusterOperationMetadata\332A2project_id,region,cluster_name,cluster,update_mask" + "\202\323\344\223\002M2B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:\007cluster\332A2project_id,region,cluster_name,cluster,update_mask\312A<\n\007Cluster\0221google.cloud.dataproc.v1.ClusterOperationMetadata" ), ), _descriptor.MethodDescriptor( diff --git a/google/cloud/dataproc_v1/proto/clusters_pb2_grpc.py b/google/cloud/dataproc_v1/proto/clusters_pb2_grpc.py index def69f14..8d5bbde7 100644 --- a/google/cloud/dataproc_v1/proto/clusters_pb2_grpc.py +++ b/google/cloud/dataproc_v1/proto/clusters_pb2_grpc.py @@ -60,7 +60,7 @@ class ClusterControllerServicer(object): def CreateCluster(self, request, context): """Creates a cluster in a project. The returned [Operation.metadata][google.longrunning.Operation.metadata] will be - [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") @@ -69,7 +69,7 @@ def CreateCluster(self, request, context): def UpdateCluster(self, request, context): """Updates a cluster in a project. The returned [Operation.metadata][google.longrunning.Operation.metadata] will be - [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") @@ -78,7 +78,7 @@ def UpdateCluster(self, request, context): def DeleteCluster(self, request, context): """Deletes a cluster in a project. The returned [Operation.metadata][google.longrunning.Operation.metadata] will be - [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") @@ -101,11 +101,11 @@ def ListClusters(self, request, context): def DiagnoseCluster(self, request, context): """Gets cluster diagnostic information. The returned [Operation.metadata][google.longrunning.Operation.metadata] will be - [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). After the operation completes, [Operation.response][google.longrunning.Operation.response] contains - [DiagnoseClusterResults](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). + [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") diff --git a/google/cloud/dataproc_v1/proto/jobs.proto b/google/cloud/dataproc_v1/proto/jobs.proto index bcb68fed..85921dc4 100644 --- a/google/cloud/dataproc_v1/proto/jobs.proto +++ b/google/cloud/dataproc_v1/proto/jobs.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,6 +19,7 @@ package google.cloud.dataproc.v1; import "google/api/annotations.proto"; import "google/api/client.proto"; import "google/api/field_behavior.proto"; +import "google/longrunning/operations.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; @@ -69,9 +70,9 @@ service JobController { // Starts a job cancellation request. To access the job resource // after cancellation, call - // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) + // [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) // or - // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). + // [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). rpc CancelJob(CancelJobRequest) returns (Job) { option (google.api.http) = { post: "/v1/projects/{project_id}/regions/{region}/jobs/{job_id}:cancel" @@ -387,6 +388,71 @@ message PigJob { LoggingConfig logging_config = 7 [(google.api.field_behavior) = OPTIONAL]; } +// A Dataproc job for running +// [Apache SparkR](https://spark.apache.org/docs/latest/sparkr.html) +// applications on YARN. +message SparkRJob { + // Required. The HCFS URI of the main R file to use as the driver. + // Must be a .R file. + string main_r_file_uri = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The arguments to pass to the driver. Do not include arguments, + // such as `--conf`, that can be set as job properties, since a collision may + // occur that causes an incorrect job submission. + repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of files to be copied to the working directory of + // R drivers and distributed tasks. Useful for naively parallel tasks. + repeated string file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of archives to be extracted in the working directory of + // Spark drivers and tasks. Supported file types: + // .jar, .tar, .tar.gz, .tgz, and .zip. + repeated string archive_uris = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A mapping of property names to values, used to configure SparkR. + // Properties that conflict with values set by the Dataproc API may be + // overwritten. Can include properties set in + // /etc/spark/conf/spark-defaults.conf and classes in user code. + map properties = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// A Dataproc job for running [Presto](https://prestosql.io/) queries +message PrestoJob { + // Required. The sequence of Presto queries to execute, specified as + // either an HCFS file URI or as a list of queries. + oneof queries { + // The HCFS URI of the script that contains SQL queries. + string query_file_uri = 1; + + // A list of queries. + QueryList query_list = 2; + } + + // Optional. Whether to continue executing queries if a query fails. + // The default value is `false`. Setting to `true` can be useful when + // executing independent parallel queries. + bool continue_on_failure = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The format in which query output will be displayed. See the + // Presto documentation for supported output formats + string output_format = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Presto client tags to attach to this query + repeated string client_tags = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A mapping of property names to values. Used to set Presto + // [session properties](https://prestodb.io/docs/current/sql/set-session.html) + // Equivalent to using the --session flag in the Presto CLI + map properties = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 7 [(google.api.field_behavior) = OPTIONAL]; +} + // Dataproc job config. message JobPlacement { // Required. The name of the cluster where the job will be submitted. @@ -562,23 +628,29 @@ message Job { // Required. The application/framework-specific portion of the job. oneof type_job { - // Job is a Hadoop job. - HadoopJob hadoop_job = 3; + // Optional. Job is a Hadoop job. + HadoopJob hadoop_job = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a Spark job. + SparkJob spark_job = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a PySpark job. + PySparkJob pyspark_job = 5 [(google.api.field_behavior) = OPTIONAL]; - // Job is a Spark job. - SparkJob spark_job = 4; + // Optional. Job is a Hive job. + HiveJob hive_job = 6 [(google.api.field_behavior) = OPTIONAL]; - // Job is a Pyspark job. - PySparkJob pyspark_job = 5; + // Optional. Job is a Pig job. + PigJob pig_job = 7 [(google.api.field_behavior) = OPTIONAL]; - // Job is a Hive job. - HiveJob hive_job = 6; + // Optional. Job is a SparkR job. + SparkRJob spark_r_job = 21 [(google.api.field_behavior) = OPTIONAL]; - // Job is a Pig job. - PigJob pig_job = 7; + // Optional. Job is a SparkSql job. + SparkSqlJob spark_sql_job = 12 [(google.api.field_behavior) = OPTIONAL]; - // Job is a SparkSql job. - SparkSqlJob spark_sql_job = 12; + // Optional. Job is a Presto job. + PrestoJob presto_job = 23 [(google.api.field_behavior) = OPTIONAL]; } // Output only. The job status. Additional application-specific diff --git a/google/cloud/dataproc_v1/proto/jobs_pb2.py b/google/cloud/dataproc_v1/proto/jobs_pb2.py index 68b7392f..b7f6ef45 100644 --- a/google/cloud/dataproc_v1/proto/jobs_pb2.py +++ b/google/cloud/dataproc_v1/proto/jobs_pb2.py @@ -18,6 +18,9 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.api import client_pb2 as google_dot_api_dot_client__pb2 from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.longrunning import ( + operations_pb2 as google_dot_longrunning_dot_operations__pb2, +) from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 @@ -31,12 +34,13 @@ "\n\034com.google.cloud.dataproc.v1B\tJobsProtoP\001Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc" ), serialized_pb=_b( - '\n)google/cloud/dataproc_v1/proto/jobs.proto\x12\x18google.cloud.dataproc.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xc1\x02\n\rLoggingConfig\x12W\n\x11\x64river_log_levels\x18\x02 \x03(\x0b\x32<.google.cloud.dataproc.v1.LoggingConfig.DriverLogLevelsEntry\x1a\x65\n\x14\x44riverLogLevelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12<\n\x05value\x18\x02 \x01(\x0e\x32-.google.cloud.dataproc.v1.LoggingConfig.Level:\x02\x38\x01"p\n\x05Level\x12\x15\n\x11LEVEL_UNSPECIFIED\x10\x00\x12\x07\n\x03\x41LL\x10\x01\x12\t\n\x05TRACE\x10\x02\x12\t\n\x05\x44\x45\x42UG\x10\x03\x12\x08\n\x04INFO\x10\x04\x12\x08\n\x04WARN\x10\x05\x12\t\n\x05\x45RROR\x10\x06\x12\t\n\x05\x46\x41TAL\x10\x07\x12\x07\n\x03OFF\x10\x08"\xf1\x02\n\tHadoopJob\x12\x1b\n\x11main_jar_file_uri\x18\x01 \x01(\tH\x00\x12\x14\n\nmain_class\x18\x02 \x01(\tH\x00\x12\x11\n\x04\x61rgs\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12L\n\nproperties\x18\x07 \x03(\x0b\x32\x33.google.cloud.dataproc.v1.HadoopJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x08 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06\x64river"\xef\x02\n\x08SparkJob\x12\x1b\n\x11main_jar_file_uri\x18\x01 \x01(\tH\x00\x12\x14\n\nmain_class\x18\x02 \x01(\tH\x00\x12\x11\n\x04\x61rgs\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12K\n\nproperties\x18\x07 \x03(\x0b\x32\x32.google.cloud.dataproc.v1.SparkJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x08 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06\x64river"\xf8\x02\n\nPySparkJob\x12!\n\x14main_python_file_uri\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\x04\x61rgs\x18\x02 \x03(\tB\x03\xe0\x41\x01\x12\x1d\n\x10python_file_uris\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12M\n\nproperties\x18\x07 \x03(\x0b\x32\x34.google.cloud.dataproc.v1.PySparkJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x08 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"!\n\tQueryList\x12\x14\n\x07queries\x18\x01 \x03(\tB\x03\xe0\x41\x02"\xb5\x03\n\x07HiveJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12\x39\n\nquery_list\x18\x02 \x01(\x0b\x32#.google.cloud.dataproc.v1.QueryListH\x00\x12 \n\x13\x63ontinue_on_failure\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12U\n\x10script_variables\x18\x04 \x03(\x0b\x32\x36.google.cloud.dataproc.v1.HiveJob.ScriptVariablesEntryB\x03\xe0\x41\x01\x12J\n\nproperties\x18\x05 \x03(\x0b\x32\x31.google.cloud.dataproc.v1.HiveJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"\xe5\x03\n\x0bSparkSqlJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12\x39\n\nquery_list\x18\x02 \x01(\x0b\x32#.google.cloud.dataproc.v1.QueryListH\x00\x12Y\n\x10script_variables\x18\x03 \x03(\x0b\x32:.google.cloud.dataproc.v1.SparkSqlJob.ScriptVariablesEntryB\x03\xe0\x41\x01\x12N\n\nproperties\x18\x04 \x03(\x0b\x32\x35.google.cloud.dataproc.v1.SparkSqlJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x38 \x03(\tB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x06 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"\xf8\x03\n\x06PigJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12\x39\n\nquery_list\x18\x02 \x01(\x0b\x32#.google.cloud.dataproc.v1.QueryListH\x00\x12 \n\x13\x63ontinue_on_failure\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12T\n\x10script_variables\x18\x04 \x03(\x0b\x32\x35.google.cloud.dataproc.v1.PigJob.ScriptVariablesEntryB\x03\xe0\x41\x01\x12I\n\nproperties\x18\x05 \x03(\x0b\x32\x30.google.cloud.dataproc.v1.PigJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x07 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"D\n\x0cJobPlacement\x12\x19\n\x0c\x63luster_name\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_uuid\x18\x02 \x01(\tB\x03\xe0\x41\x03"\xd9\x03\n\tJobStatus\x12=\n\x05state\x18\x01 \x01(\x0e\x32).google.cloud.dataproc.v1.JobStatus.StateB\x03\xe0\x41\x03\x12\x17\n\x07\x64\x65tails\x18\x02 \x01(\tB\x06\xe0\x41\x03\xe0\x41\x01\x12\x39\n\x10state_start_time\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x43\n\x08substate\x18\x07 \x01(\x0e\x32,.google.cloud.dataproc.v1.JobStatus.SubstateB\x03\xe0\x41\x03"\xa9\x01\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\x0e\n\nSETUP_DONE\x10\x08\x12\x0b\n\x07RUNNING\x10\x02\x12\x12\n\x0e\x43\x41NCEL_PENDING\x10\x03\x12\x12\n\x0e\x43\x41NCEL_STARTED\x10\x07\x12\r\n\tCANCELLED\x10\x04\x12\x08\n\x04\x44ONE\x10\x05\x12\t\n\x05\x45RROR\x10\x06\x12\x13\n\x0f\x41TTEMPT_FAILURE\x10\t"H\n\x08Substate\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\r\n\tSUBMITTED\x10\x01\x12\n\n\x06QUEUED\x10\x02\x12\x10\n\x0cSTALE_STATUS\x10\x03"<\n\x0cJobReference\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x01"\xa5\x02\n\x0fYarnApplication\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x43\n\x05state\x18\x02 \x01(\x0e\x32/.google.cloud.dataproc.v1.YarnApplication.StateB\x03\xe0\x41\x02\x12\x15\n\x08progress\x18\x03 \x01(\x02\x42\x03\xe0\x41\x02\x12\x19\n\x0ctracking_url\x18\x04 \x01(\tB\x03\xe0\x41\x01"\x87\x01\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x07\n\x03NEW\x10\x01\x12\x0e\n\nNEW_SAVING\x10\x02\x12\r\n\tSUBMITTED\x10\x03\x12\x0c\n\x08\x41\x43\x43\x45PTED\x10\x04\x12\x0b\n\x07RUNNING\x10\x05\x12\x0c\n\x08\x46INISHED\x10\x06\x12\n\n\x06\x46\x41ILED\x10\x07\x12\n\n\x06KILLED\x10\x08"\xcd\x07\n\x03Job\x12>\n\treference\x18\x01 \x01(\x0b\x32&.google.cloud.dataproc.v1.JobReferenceB\x03\xe0\x41\x01\x12>\n\tplacement\x18\x02 \x01(\x0b\x32&.google.cloud.dataproc.v1.JobPlacementB\x03\xe0\x41\x02\x12\x39\n\nhadoop_job\x18\x03 \x01(\x0b\x32#.google.cloud.dataproc.v1.HadoopJobH\x00\x12\x37\n\tspark_job\x18\x04 \x01(\x0b\x32".google.cloud.dataproc.v1.SparkJobH\x00\x12;\n\x0bpyspark_job\x18\x05 \x01(\x0b\x32$.google.cloud.dataproc.v1.PySparkJobH\x00\x12\x35\n\x08hive_job\x18\x06 \x01(\x0b\x32!.google.cloud.dataproc.v1.HiveJobH\x00\x12\x33\n\x07pig_job\x18\x07 \x01(\x0b\x32 .google.cloud.dataproc.v1.PigJobH\x00\x12>\n\rspark_sql_job\x18\x0c \x01(\x0b\x32%.google.cloud.dataproc.v1.SparkSqlJobH\x00\x12\x38\n\x06status\x18\x08 \x01(\x0b\x32#.google.cloud.dataproc.v1.JobStatusB\x03\xe0\x41\x03\x12@\n\x0estatus_history\x18\r \x03(\x0b\x32#.google.cloud.dataproc.v1.JobStatusB\x03\xe0\x41\x03\x12I\n\x11yarn_applications\x18\t \x03(\x0b\x32).google.cloud.dataproc.v1.YarnApplicationB\x03\xe0\x41\x03\x12\'\n\x1a\x64river_output_resource_uri\x18\x11 \x01(\tB\x03\xe0\x41\x03\x12%\n\x18\x64river_control_files_uri\x18\x0f \x01(\tB\x03\xe0\x41\x03\x12>\n\x06labels\x18\x12 \x03(\x0b\x32).google.cloud.dataproc.v1.Job.LabelsEntryB\x03\xe0\x41\x01\x12@\n\nscheduling\x18\x14 \x01(\x0b\x32\'.google.cloud.dataproc.v1.JobSchedulingB\x03\xe0\x41\x01\x12\x15\n\x08job_uuid\x18\x16 \x01(\tB\x03\xe0\x41\x03\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\n\n\x08type_job"3\n\rJobScheduling\x12"\n\x15max_failures_per_hour\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01"\x8a\x01\n\x10SubmitJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12/\n\x03job\x18\x02 \x01(\x0b\x32\x1d.google.cloud.dataproc.v1.JobB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x04 \x01(\tB\x03\xe0\x41\x01"R\n\rGetJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02"\xb3\x02\n\x0fListJobsRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x06 \x01(\tB\x03\xe0\x41\x02\x12\x16\n\tpage_size\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x17\n\npage_token\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x63luster_name\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12Y\n\x11job_state_matcher\x18\x05 \x01(\x0e\x32\x39.google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcherB\x03\xe0\x41\x01\x12\x13\n\x06\x66ilter\x18\x07 \x01(\tB\x03\xe0\x41\x01"6\n\x0fJobStateMatcher\x12\x07\n\x03\x41LL\x10\x00\x12\n\n\x06\x41\x43TIVE\x10\x01\x12\x0e\n\nNON_ACTIVE\x10\x02"\xbc\x01\n\x10UpdateJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12/\n\x03job\x18\x04 \x01(\x0b\x32\x1d.google.cloud.dataproc.v1.JobB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"b\n\x10ListJobsResponse\x12\x30\n\x04jobs\x18\x01 \x03(\x0b\x32\x1d.google.cloud.dataproc.v1.JobB\x03\xe0\x41\x03\x12\x1c\n\x0fnext_page_token\x18\x02 \x01(\tB\x03\xe0\x41\x01"U\n\x10\x43\x61ncelJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02"U\n\x10\x44\x65leteJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x32\x9b\t\n\rJobController\x12\xb1\x01\n\tSubmitJob\x12*.google.cloud.dataproc.v1.SubmitJobRequest\x1a\x1d.google.cloud.dataproc.v1.Job"Y\x82\xd3\xe4\x93\x02;"6/v1/projects/{project_id}/regions/{region}/jobs:submit:\x01*\xda\x41\x15project_id,region,job\x12\xad\x01\n\x06GetJob\x12\'.google.cloud.dataproc.v1.GetJobRequest\x1a\x1d.google.cloud.dataproc.v1.Job"[\x82\xd3\xe4\x93\x02:\x12\x38/v1/projects/{project_id}/regions/{region}/jobs/{job_id}\xda\x41\x18project_id,region,job_id\x12\xc9\x01\n\x08ListJobs\x12).google.cloud.dataproc.v1.ListJobsRequest\x1a*.google.cloud.dataproc.v1.ListJobsResponse"f\x82\xd3\xe4\x93\x02\x31\x12//v1/projects/{project_id}/regions/{region}/jobs\xda\x41\x11project_id,region\xda\x41\x18project_id,region,filter\x12\x9d\x01\n\tUpdateJob\x12*.google.cloud.dataproc.v1.UpdateJobRequest\x1a\x1d.google.cloud.dataproc.v1.Job"E\x82\xd3\xe4\x93\x02?28/v1/projects/{project_id}/regions/{region}/jobs/{job_id}:\x03job\x12\xbd\x01\n\tCancelJob\x12*.google.cloud.dataproc.v1.CancelJobRequest\x1a\x1d.google.cloud.dataproc.v1.Job"e\x82\xd3\xe4\x93\x02\x44"?/v1/projects/{project_id}/regions/{region}/jobs/{job_id}:cancel:\x01*\xda\x41\x18project_id,region,job_id\x12\xac\x01\n\tDeleteJob\x12*.google.cloud.dataproc.v1.DeleteJobRequest\x1a\x16.google.protobuf.Empty"[\x82\xd3\xe4\x93\x02:*8/v1/projects/{project_id}/regions/{region}/jobs/{job_id}\xda\x41\x18project_id,region,job_id\x1aK\xca\x41\x17\x64\x61taproc.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformBm\n\x1c\x63om.google.cloud.dataproc.v1B\tJobsProtoP\x01Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataprocb\x06proto3' + '\n)google/cloud/dataproc_v1/proto/jobs.proto\x12\x18google.cloud.dataproc.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xc1\x02\n\rLoggingConfig\x12W\n\x11\x64river_log_levels\x18\x02 \x03(\x0b\x32<.google.cloud.dataproc.v1.LoggingConfig.DriverLogLevelsEntry\x1a\x65\n\x14\x44riverLogLevelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12<\n\x05value\x18\x02 \x01(\x0e\x32-.google.cloud.dataproc.v1.LoggingConfig.Level:\x02\x38\x01"p\n\x05Level\x12\x15\n\x11LEVEL_UNSPECIFIED\x10\x00\x12\x07\n\x03\x41LL\x10\x01\x12\t\n\x05TRACE\x10\x02\x12\t\n\x05\x44\x45\x42UG\x10\x03\x12\x08\n\x04INFO\x10\x04\x12\x08\n\x04WARN\x10\x05\x12\t\n\x05\x45RROR\x10\x06\x12\t\n\x05\x46\x41TAL\x10\x07\x12\x07\n\x03OFF\x10\x08"\xf1\x02\n\tHadoopJob\x12\x1b\n\x11main_jar_file_uri\x18\x01 \x01(\tH\x00\x12\x14\n\nmain_class\x18\x02 \x01(\tH\x00\x12\x11\n\x04\x61rgs\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12L\n\nproperties\x18\x07 \x03(\x0b\x32\x33.google.cloud.dataproc.v1.HadoopJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x08 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06\x64river"\xef\x02\n\x08SparkJob\x12\x1b\n\x11main_jar_file_uri\x18\x01 \x01(\tH\x00\x12\x14\n\nmain_class\x18\x02 \x01(\tH\x00\x12\x11\n\x04\x61rgs\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12K\n\nproperties\x18\x07 \x03(\x0b\x32\x32.google.cloud.dataproc.v1.SparkJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x08 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06\x64river"\xf8\x02\n\nPySparkJob\x12!\n\x14main_python_file_uri\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\x04\x61rgs\x18\x02 \x03(\tB\x03\xe0\x41\x01\x12\x1d\n\x10python_file_uris\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12M\n\nproperties\x18\x07 \x03(\x0b\x32\x34.google.cloud.dataproc.v1.PySparkJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x08 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"!\n\tQueryList\x12\x14\n\x07queries\x18\x01 \x03(\tB\x03\xe0\x41\x02"\xb5\x03\n\x07HiveJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12\x39\n\nquery_list\x18\x02 \x01(\x0b\x32#.google.cloud.dataproc.v1.QueryListH\x00\x12 \n\x13\x63ontinue_on_failure\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12U\n\x10script_variables\x18\x04 \x03(\x0b\x32\x36.google.cloud.dataproc.v1.HiveJob.ScriptVariablesEntryB\x03\xe0\x41\x01\x12J\n\nproperties\x18\x05 \x03(\x0b\x32\x31.google.cloud.dataproc.v1.HiveJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"\xe5\x03\n\x0bSparkSqlJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12\x39\n\nquery_list\x18\x02 \x01(\x0b\x32#.google.cloud.dataproc.v1.QueryListH\x00\x12Y\n\x10script_variables\x18\x03 \x03(\x0b\x32:.google.cloud.dataproc.v1.SparkSqlJob.ScriptVariablesEntryB\x03\xe0\x41\x01\x12N\n\nproperties\x18\x04 \x03(\x0b\x32\x35.google.cloud.dataproc.v1.SparkSqlJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x38 \x03(\tB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x06 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"\xf8\x03\n\x06PigJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12\x39\n\nquery_list\x18\x02 \x01(\x0b\x32#.google.cloud.dataproc.v1.QueryListH\x00\x12 \n\x13\x63ontinue_on_failure\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12T\n\x10script_variables\x18\x04 \x03(\x0b\x32\x35.google.cloud.dataproc.v1.PigJob.ScriptVariablesEntryB\x03\xe0\x41\x01\x12I\n\nproperties\x18\x05 \x03(\x0b\x32\x30.google.cloud.dataproc.v1.PigJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x07 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"\xb6\x02\n\tSparkRJob\x12\x1c\n\x0fmain_r_file_uri\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\x04\x61rgs\x18\x02 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12L\n\nproperties\x18\x05 \x03(\x0b\x32\x33.google.cloud.dataproc.v1.SparkRJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x06 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x8a\x03\n\tPrestoJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12\x39\n\nquery_list\x18\x02 \x01(\x0b\x32#.google.cloud.dataproc.v1.QueryListH\x00\x12 \n\x13\x63ontinue_on_failure\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12\x1a\n\routput_format\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0b\x63lient_tags\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12L\n\nproperties\x18\x06 \x03(\x0b\x32\x33.google.cloud.dataproc.v1.PrestoJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x07 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"D\n\x0cJobPlacement\x12\x19\n\x0c\x63luster_name\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_uuid\x18\x02 \x01(\tB\x03\xe0\x41\x03"\xd9\x03\n\tJobStatus\x12=\n\x05state\x18\x01 \x01(\x0e\x32).google.cloud.dataproc.v1.JobStatus.StateB\x03\xe0\x41\x03\x12\x17\n\x07\x64\x65tails\x18\x02 \x01(\tB\x06\xe0\x41\x03\xe0\x41\x01\x12\x39\n\x10state_start_time\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x43\n\x08substate\x18\x07 \x01(\x0e\x32,.google.cloud.dataproc.v1.JobStatus.SubstateB\x03\xe0\x41\x03"\xa9\x01\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\x0e\n\nSETUP_DONE\x10\x08\x12\x0b\n\x07RUNNING\x10\x02\x12\x12\n\x0e\x43\x41NCEL_PENDING\x10\x03\x12\x12\n\x0e\x43\x41NCEL_STARTED\x10\x07\x12\r\n\tCANCELLED\x10\x04\x12\x08\n\x04\x44ONE\x10\x05\x12\t\n\x05\x45RROR\x10\x06\x12\x13\n\x0f\x41TTEMPT_FAILURE\x10\t"H\n\x08Substate\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\r\n\tSUBMITTED\x10\x01\x12\n\n\x06QUEUED\x10\x02\x12\x10\n\x0cSTALE_STATUS\x10\x03"<\n\x0cJobReference\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x01"\xa5\x02\n\x0fYarnApplication\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x43\n\x05state\x18\x02 \x01(\x0e\x32/.google.cloud.dataproc.v1.YarnApplication.StateB\x03\xe0\x41\x02\x12\x15\n\x08progress\x18\x03 \x01(\x02\x42\x03\xe0\x41\x02\x12\x19\n\x0ctracking_url\x18\x04 \x01(\tB\x03\xe0\x41\x01"\x87\x01\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x07\n\x03NEW\x10\x01\x12\x0e\n\nNEW_SAVING\x10\x02\x12\r\n\tSUBMITTED\x10\x03\x12\x0c\n\x08\x41\x43\x43\x45PTED\x10\x04\x12\x0b\n\x07RUNNING\x10\x05\x12\x0c\n\x08\x46INISHED\x10\x06\x12\n\n\x06\x46\x41ILED\x10\x07\x12\n\n\x06KILLED\x10\x08"\xec\x08\n\x03Job\x12>\n\treference\x18\x01 \x01(\x0b\x32&.google.cloud.dataproc.v1.JobReferenceB\x03\xe0\x41\x01\x12>\n\tplacement\x18\x02 \x01(\x0b\x32&.google.cloud.dataproc.v1.JobPlacementB\x03\xe0\x41\x02\x12>\n\nhadoop_job\x18\x03 \x01(\x0b\x32#.google.cloud.dataproc.v1.HadoopJobB\x03\xe0\x41\x01H\x00\x12<\n\tspark_job\x18\x04 \x01(\x0b\x32".google.cloud.dataproc.v1.SparkJobB\x03\xe0\x41\x01H\x00\x12@\n\x0bpyspark_job\x18\x05 \x01(\x0b\x32$.google.cloud.dataproc.v1.PySparkJobB\x03\xe0\x41\x01H\x00\x12:\n\x08hive_job\x18\x06 \x01(\x0b\x32!.google.cloud.dataproc.v1.HiveJobB\x03\xe0\x41\x01H\x00\x12\x38\n\x07pig_job\x18\x07 \x01(\x0b\x32 .google.cloud.dataproc.v1.PigJobB\x03\xe0\x41\x01H\x00\x12?\n\x0bspark_r_job\x18\x15 \x01(\x0b\x32#.google.cloud.dataproc.v1.SparkRJobB\x03\xe0\x41\x01H\x00\x12\x43\n\rspark_sql_job\x18\x0c \x01(\x0b\x32%.google.cloud.dataproc.v1.SparkSqlJobB\x03\xe0\x41\x01H\x00\x12>\n\npresto_job\x18\x17 \x01(\x0b\x32#.google.cloud.dataproc.v1.PrestoJobB\x03\xe0\x41\x01H\x00\x12\x38\n\x06status\x18\x08 \x01(\x0b\x32#.google.cloud.dataproc.v1.JobStatusB\x03\xe0\x41\x03\x12@\n\x0estatus_history\x18\r \x03(\x0b\x32#.google.cloud.dataproc.v1.JobStatusB\x03\xe0\x41\x03\x12I\n\x11yarn_applications\x18\t \x03(\x0b\x32).google.cloud.dataproc.v1.YarnApplicationB\x03\xe0\x41\x03\x12\'\n\x1a\x64river_output_resource_uri\x18\x11 \x01(\tB\x03\xe0\x41\x03\x12%\n\x18\x64river_control_files_uri\x18\x0f \x01(\tB\x03\xe0\x41\x03\x12>\n\x06labels\x18\x12 \x03(\x0b\x32).google.cloud.dataproc.v1.Job.LabelsEntryB\x03\xe0\x41\x01\x12@\n\nscheduling\x18\x14 \x01(\x0b\x32\'.google.cloud.dataproc.v1.JobSchedulingB\x03\xe0\x41\x01\x12\x15\n\x08job_uuid\x18\x16 \x01(\tB\x03\xe0\x41\x03\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\n\n\x08type_job"3\n\rJobScheduling\x12"\n\x15max_failures_per_hour\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01"\x8a\x01\n\x10SubmitJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12/\n\x03job\x18\x02 \x01(\x0b\x32\x1d.google.cloud.dataproc.v1.JobB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x04 \x01(\tB\x03\xe0\x41\x01"R\n\rGetJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02"\xb3\x02\n\x0fListJobsRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x06 \x01(\tB\x03\xe0\x41\x02\x12\x16\n\tpage_size\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x17\n\npage_token\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x63luster_name\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12Y\n\x11job_state_matcher\x18\x05 \x01(\x0e\x32\x39.google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcherB\x03\xe0\x41\x01\x12\x13\n\x06\x66ilter\x18\x07 \x01(\tB\x03\xe0\x41\x01"6\n\x0fJobStateMatcher\x12\x07\n\x03\x41LL\x10\x00\x12\n\n\x06\x41\x43TIVE\x10\x01\x12\x0e\n\nNON_ACTIVE\x10\x02"\xbc\x01\n\x10UpdateJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12/\n\x03job\x18\x04 \x01(\x0b\x32\x1d.google.cloud.dataproc.v1.JobB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"b\n\x10ListJobsResponse\x12\x30\n\x04jobs\x18\x01 \x03(\x0b\x32\x1d.google.cloud.dataproc.v1.JobB\x03\xe0\x41\x03\x12\x1c\n\x0fnext_page_token\x18\x02 \x01(\tB\x03\xe0\x41\x01"U\n\x10\x43\x61ncelJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02"U\n\x10\x44\x65leteJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x32\x9b\t\n\rJobController\x12\xb1\x01\n\tSubmitJob\x12*.google.cloud.dataproc.v1.SubmitJobRequest\x1a\x1d.google.cloud.dataproc.v1.Job"Y\x82\xd3\xe4\x93\x02;"6/v1/projects/{project_id}/regions/{region}/jobs:submit:\x01*\xda\x41\x15project_id,region,job\x12\xad\x01\n\x06GetJob\x12\'.google.cloud.dataproc.v1.GetJobRequest\x1a\x1d.google.cloud.dataproc.v1.Job"[\x82\xd3\xe4\x93\x02:\x12\x38/v1/projects/{project_id}/regions/{region}/jobs/{job_id}\xda\x41\x18project_id,region,job_id\x12\xc9\x01\n\x08ListJobs\x12).google.cloud.dataproc.v1.ListJobsRequest\x1a*.google.cloud.dataproc.v1.ListJobsResponse"f\x82\xd3\xe4\x93\x02\x31\x12//v1/projects/{project_id}/regions/{region}/jobs\xda\x41\x11project_id,region\xda\x41\x18project_id,region,filter\x12\x9d\x01\n\tUpdateJob\x12*.google.cloud.dataproc.v1.UpdateJobRequest\x1a\x1d.google.cloud.dataproc.v1.Job"E\x82\xd3\xe4\x93\x02?28/v1/projects/{project_id}/regions/{region}/jobs/{job_id}:\x03job\x12\xbd\x01\n\tCancelJob\x12*.google.cloud.dataproc.v1.CancelJobRequest\x1a\x1d.google.cloud.dataproc.v1.Job"e\x82\xd3\xe4\x93\x02\x44"?/v1/projects/{project_id}/regions/{region}/jobs/{job_id}:cancel:\x01*\xda\x41\x18project_id,region,job_id\x12\xac\x01\n\tDeleteJob\x12*.google.cloud.dataproc.v1.DeleteJobRequest\x1a\x16.google.protobuf.Empty"[\x82\xd3\xe4\x93\x02:*8/v1/projects/{project_id}/regions/{region}/jobs/{job_id}\xda\x41\x18project_id,region,job_id\x1aK\xca\x41\x17\x64\x61taproc.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformBm\n\x1c\x63om.google.cloud.dataproc.v1B\tJobsProtoP\x01Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataprocb\x06proto3' ), dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, + google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, @@ -84,8 +88,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=465, - serialized_end=577, + serialized_start=502, + serialized_end=614, ) _sym_db.RegisterEnumDescriptor(_LOGGINGCONFIG_LEVEL) @@ -136,8 +140,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3471, - serialized_end=3640, + serialized_start=4218, + serialized_end=4387, ) _sym_db.RegisterEnumDescriptor(_JOBSTATUS_STATE) @@ -162,8 +166,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3642, - serialized_end=3714, + serialized_start=4389, + serialized_end=4461, ) _sym_db.RegisterEnumDescriptor(_JOBSTATUS_SUBSTATE) @@ -207,8 +211,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3937, - serialized_end=4072, + serialized_start=4684, + serialized_end=4819, ) _sym_db.RegisterEnumDescriptor(_YARNAPPLICATION_STATE) @@ -230,8 +234,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=5582, - serialized_end=5636, + serialized_start=6488, + serialized_end=6542, ) _sym_db.RegisterEnumDescriptor(_LISTJOBSREQUEST_JOBSTATEMATCHER) @@ -288,8 +292,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=362, - serialized_end=463, + serialized_start=399, + serialized_end=500, ) _LOGGINGCONFIG = _descriptor.Descriptor( @@ -326,8 +330,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=256, - serialized_end=577, + serialized_start=293, + serialized_end=614, ) @@ -383,8 +387,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=890, - serialized_end=939, + serialized_start=927, + serialized_end=976, ) _HADOOPJOB = _descriptor.Descriptor( @@ -555,8 +559,8 @@ fields=[], ) ], - serialized_start=580, - serialized_end=949, + serialized_start=617, + serialized_end=986, ) @@ -612,8 +616,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=890, - serialized_end=939, + serialized_start=927, + serialized_end=976, ) _SPARKJOB = _descriptor.Descriptor( @@ -784,8 +788,8 @@ fields=[], ) ], - serialized_start=952, - serialized_end=1319, + serialized_start=989, + serialized_end=1356, ) @@ -841,8 +845,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=890, - serialized_end=939, + serialized_start=927, + serialized_end=976, ) _PYSPARKJOB = _descriptor.Descriptor( @@ -1005,8 +1009,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1322, - serialized_end=1698, + serialized_start=1359, + serialized_end=1735, ) @@ -1044,8 +1048,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1700, - serialized_end=1733, + serialized_start=1737, + serialized_end=1770, ) @@ -1101,8 +1105,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2057, - serialized_end=2111, + serialized_start=2094, + serialized_end=2148, ) _HIVEJOB_PROPERTIESENTRY = _descriptor.Descriptor( @@ -1157,8 +1161,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=890, - serialized_end=939, + serialized_start=927, + serialized_end=976, ) _HIVEJOB = _descriptor.Descriptor( @@ -1293,8 +1297,8 @@ fields=[], ) ], - serialized_start=1736, - serialized_end=2173, + serialized_start=1773, + serialized_end=2210, ) @@ -1350,8 +1354,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2057, - serialized_end=2111, + serialized_start=2094, + serialized_end=2148, ) _SPARKSQLJOB_PROPERTIESENTRY = _descriptor.Descriptor( @@ -1406,8 +1410,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=890, - serialized_end=939, + serialized_start=927, + serialized_end=976, ) _SPARKSQLJOB = _descriptor.Descriptor( @@ -1542,8 +1546,8 @@ fields=[], ) ], - serialized_start=2176, - serialized_end=2661, + serialized_start=2213, + serialized_end=2698, ) @@ -1599,8 +1603,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2057, - serialized_end=2111, + serialized_start=2094, + serialized_end=2148, ) _PIGJOB_PROPERTIESENTRY = _descriptor.Descriptor( @@ -1655,8 +1659,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=890, - serialized_end=939, + serialized_start=927, + serialized_end=976, ) _PIGJOB = _descriptor.Descriptor( @@ -1809,8 +1813,404 @@ fields=[], ) ], - serialized_start=2664, - serialized_end=3168, + serialized_start=2701, + serialized_end=3205, +) + + +_SPARKRJOB_PROPERTIESENTRY = _descriptor.Descriptor( + name="PropertiesEntry", + full_name="google.cloud.dataproc.v1.SparkRJob.PropertiesEntry", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="key", + full_name="google.cloud.dataproc.v1.SparkRJob.PropertiesEntry.key", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="value", + full_name="google.cloud.dataproc.v1.SparkRJob.PropertiesEntry.value", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=_b("8\001"), + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=927, + serialized_end=976, +) + +_SPARKRJOB = _descriptor.Descriptor( + name="SparkRJob", + full_name="google.cloud.dataproc.v1.SparkRJob", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="main_r_file_uri", + full_name="google.cloud.dataproc.v1.SparkRJob.main_r_file_uri", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\002"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="args", + full_name="google.cloud.dataproc.v1.SparkRJob.args", + index=1, + number=2, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="file_uris", + full_name="google.cloud.dataproc.v1.SparkRJob.file_uris", + index=2, + number=3, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="archive_uris", + full_name="google.cloud.dataproc.v1.SparkRJob.archive_uris", + index=3, + number=4, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="properties", + full_name="google.cloud.dataproc.v1.SparkRJob.properties", + index=4, + number=5, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="logging_config", + full_name="google.cloud.dataproc.v1.SparkRJob.logging_config", + index=5, + number=6, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[_SPARKRJOB_PROPERTIESENTRY], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3208, + serialized_end=3518, +) + + +_PRESTOJOB_PROPERTIESENTRY = _descriptor.Descriptor( + name="PropertiesEntry", + full_name="google.cloud.dataproc.v1.PrestoJob.PropertiesEntry", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="key", + full_name="google.cloud.dataproc.v1.PrestoJob.PropertiesEntry.key", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="value", + full_name="google.cloud.dataproc.v1.PrestoJob.PropertiesEntry.value", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=_b("8\001"), + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=927, + serialized_end=976, +) + +_PRESTOJOB = _descriptor.Descriptor( + name="PrestoJob", + full_name="google.cloud.dataproc.v1.PrestoJob", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="query_file_uri", + full_name="google.cloud.dataproc.v1.PrestoJob.query_file_uri", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="query_list", + full_name="google.cloud.dataproc.v1.PrestoJob.query_list", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="continue_on_failure", + full_name="google.cloud.dataproc.v1.PrestoJob.continue_on_failure", + index=2, + number=3, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="output_format", + full_name="google.cloud.dataproc.v1.PrestoJob.output_format", + index=3, + number=4, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="client_tags", + full_name="google.cloud.dataproc.v1.PrestoJob.client_tags", + index=4, + number=5, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="properties", + full_name="google.cloud.dataproc.v1.PrestoJob.properties", + index=5, + number=6, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="logging_config", + full_name="google.cloud.dataproc.v1.PrestoJob.logging_config", + index=6, + number=7, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[_PRESTOJOB_PROPERTIESENTRY], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="queries", + full_name="google.cloud.dataproc.v1.PrestoJob.queries", + index=0, + containing_type=None, + fields=[], + ) + ], + serialized_start=3521, + serialized_end=3915, ) @@ -1866,8 +2266,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3170, - serialized_end=3238, + serialized_start=3917, + serialized_end=3985, ) @@ -1959,8 +2359,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3241, - serialized_end=3714, + serialized_start=3988, + serialized_end=4461, ) @@ -2016,8 +2416,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3716, - serialized_end=3776, + serialized_start=4463, + serialized_end=4523, ) @@ -2109,8 +2509,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3779, - serialized_end=4072, + serialized_start=4526, + serialized_end=4819, ) @@ -2166,8 +2566,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4991, - serialized_end=5036, + serialized_start=5897, + serialized_end=5942, ) _JOB = _descriptor.Descriptor( @@ -2228,7 +2628,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2246,7 +2646,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2264,7 +2664,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2282,7 +2682,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2300,13 +2700,31 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="spark_r_job", + full_name="google.cloud.dataproc.v1.Job.spark_r_job", + index=7, + number=21, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="spark_sql_job", full_name="google.cloud.dataproc.v1.Job.spark_sql_job", - index=7, + index=8, number=12, type=11, cpp_type=10, @@ -2318,13 +2736,31 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="presto_job", + full_name="google.cloud.dataproc.v1.Job.presto_job", + index=9, + number=23, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( name="status", full_name="google.cloud.dataproc.v1.Job.status", - index=8, + index=10, number=8, type=11, cpp_type=10, @@ -2342,7 +2778,7 @@ _descriptor.FieldDescriptor( name="status_history", full_name="google.cloud.dataproc.v1.Job.status_history", - index=9, + index=11, number=13, type=11, cpp_type=10, @@ -2360,7 +2796,7 @@ _descriptor.FieldDescriptor( name="yarn_applications", full_name="google.cloud.dataproc.v1.Job.yarn_applications", - index=10, + index=12, number=9, type=11, cpp_type=10, @@ -2378,7 +2814,7 @@ _descriptor.FieldDescriptor( name="driver_output_resource_uri", full_name="google.cloud.dataproc.v1.Job.driver_output_resource_uri", - index=11, + index=13, number=17, type=9, cpp_type=9, @@ -2396,7 +2832,7 @@ _descriptor.FieldDescriptor( name="driver_control_files_uri", full_name="google.cloud.dataproc.v1.Job.driver_control_files_uri", - index=12, + index=14, number=15, type=9, cpp_type=9, @@ -2414,7 +2850,7 @@ _descriptor.FieldDescriptor( name="labels", full_name="google.cloud.dataproc.v1.Job.labels", - index=13, + index=15, number=18, type=11, cpp_type=10, @@ -2432,7 +2868,7 @@ _descriptor.FieldDescriptor( name="scheduling", full_name="google.cloud.dataproc.v1.Job.scheduling", - index=14, + index=16, number=20, type=11, cpp_type=10, @@ -2450,7 +2886,7 @@ _descriptor.FieldDescriptor( name="job_uuid", full_name="google.cloud.dataproc.v1.Job.job_uuid", - index=15, + index=17, number=22, type=9, cpp_type=9, @@ -2482,8 +2918,8 @@ fields=[], ) ], - serialized_start=4075, - serialized_end=5048, + serialized_start=4822, + serialized_end=5954, ) @@ -2521,8 +2957,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5050, - serialized_end=5101, + serialized_start=5956, + serialized_end=6007, ) @@ -2614,8 +3050,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5104, - serialized_end=5242, + serialized_start=6010, + serialized_end=6148, ) @@ -2689,8 +3125,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5244, - serialized_end=5326, + serialized_start=6150, + serialized_end=6232, ) @@ -2836,8 +3272,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5329, - serialized_end=5636, + serialized_start=6235, + serialized_end=6542, ) @@ -2947,8 +3383,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5639, - serialized_end=5827, + serialized_start=6545, + serialized_end=6733, ) @@ -3004,8 +3440,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5829, - serialized_end=5927, + serialized_start=6735, + serialized_end=6833, ) @@ -3079,8 +3515,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5929, - serialized_end=6014, + serialized_start=6835, + serialized_end=6920, ) @@ -3154,8 +3590,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6016, - serialized_end=6101, + serialized_start=6922, + serialized_end=7007, ) _LOGGINGCONFIG_DRIVERLOGLEVELSENTRY.fields_by_name[ @@ -3248,6 +3684,25 @@ _PIGJOB.fields_by_name["query_list"].containing_oneof = _PIGJOB.oneofs_by_name[ "queries" ] +_SPARKRJOB_PROPERTIESENTRY.containing_type = _SPARKRJOB +_SPARKRJOB.fields_by_name["properties"].message_type = _SPARKRJOB_PROPERTIESENTRY +_SPARKRJOB.fields_by_name["logging_config"].message_type = _LOGGINGCONFIG +_PRESTOJOB_PROPERTIESENTRY.containing_type = _PRESTOJOB +_PRESTOJOB.fields_by_name["query_list"].message_type = _QUERYLIST +_PRESTOJOB.fields_by_name["properties"].message_type = _PRESTOJOB_PROPERTIESENTRY +_PRESTOJOB.fields_by_name["logging_config"].message_type = _LOGGINGCONFIG +_PRESTOJOB.oneofs_by_name["queries"].fields.append( + _PRESTOJOB.fields_by_name["query_file_uri"] +) +_PRESTOJOB.fields_by_name[ + "query_file_uri" +].containing_oneof = _PRESTOJOB.oneofs_by_name["queries"] +_PRESTOJOB.oneofs_by_name["queries"].fields.append( + _PRESTOJOB.fields_by_name["query_list"] +) +_PRESTOJOB.fields_by_name["query_list"].containing_oneof = _PRESTOJOB.oneofs_by_name[ + "queries" +] _JOBSTATUS.fields_by_name["state"].enum_type = _JOBSTATUS_STATE _JOBSTATUS.fields_by_name[ "state_start_time" @@ -3265,7 +3720,9 @@ _JOB.fields_by_name["pyspark_job"].message_type = _PYSPARKJOB _JOB.fields_by_name["hive_job"].message_type = _HIVEJOB _JOB.fields_by_name["pig_job"].message_type = _PIGJOB +_JOB.fields_by_name["spark_r_job"].message_type = _SPARKRJOB _JOB.fields_by_name["spark_sql_job"].message_type = _SPARKSQLJOB +_JOB.fields_by_name["presto_job"].message_type = _PRESTOJOB _JOB.fields_by_name["status"].message_type = _JOBSTATUS _JOB.fields_by_name["status_history"].message_type = _JOBSTATUS _JOB.fields_by_name["yarn_applications"].message_type = _YARNAPPLICATION @@ -3281,8 +3738,12 @@ _JOB.fields_by_name["hive_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] _JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["pig_job"]) _JOB.fields_by_name["pig_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] +_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["spark_r_job"]) +_JOB.fields_by_name["spark_r_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] _JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["spark_sql_job"]) _JOB.fields_by_name["spark_sql_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] +_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["presto_job"]) +_JOB.fields_by_name["presto_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] _SUBMITJOBREQUEST.fields_by_name["job"].message_type = _JOB _LISTJOBSREQUEST.fields_by_name[ "job_state_matcher" @@ -3301,6 +3762,8 @@ DESCRIPTOR.message_types_by_name["HiveJob"] = _HIVEJOB DESCRIPTOR.message_types_by_name["SparkSqlJob"] = _SPARKSQLJOB DESCRIPTOR.message_types_by_name["PigJob"] = _PIGJOB +DESCRIPTOR.message_types_by_name["SparkRJob"] = _SPARKRJOB +DESCRIPTOR.message_types_by_name["PrestoJob"] = _PRESTOJOB DESCRIPTOR.message_types_by_name["JobPlacement"] = _JOBPLACEMENT DESCRIPTOR.message_types_by_name["JobStatus"] = _JOBSTATUS DESCRIPTOR.message_types_by_name["JobReference"] = _JOBREFERENCE @@ -3739,6 +4202,108 @@ _sym_db.RegisterMessage(PigJob.ScriptVariablesEntry) _sym_db.RegisterMessage(PigJob.PropertiesEntry) +SparkRJob = _reflection.GeneratedProtocolMessageType( + "SparkRJob", + (_message.Message,), + dict( + PropertiesEntry=_reflection.GeneratedProtocolMessageType( + "PropertiesEntry", + (_message.Message,), + dict( + DESCRIPTOR=_SPARKRJOB_PROPERTIESENTRY, + __module__="google.cloud.dataproc_v1.proto.jobs_pb2" + # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SparkRJob.PropertiesEntry) + ), + ), + DESCRIPTOR=_SPARKRJOB, + __module__="google.cloud.dataproc_v1.proto.jobs_pb2", + __doc__="""A Dataproc job for running `Apache + SparkR `__ + applications on YARN. + + + Attributes: + main_r_file_uri: + Required. The HCFS URI of the main R file to use as the + driver. Must be a .R file. + args: + Optional. The arguments to pass to the driver. Do not include + arguments, such as ``--conf``, that can be set as job + properties, since a collision may occur that causes an + incorrect job submission. + file_uris: + Optional. HCFS URIs of files to be copied to the working + directory of R drivers and distributed tasks. Useful for + naively parallel tasks. + archive_uris: + Optional. HCFS URIs of archives to be extracted in the working + directory of Spark drivers and tasks. Supported file types: + .jar, .tar, .tar.gz, .tgz, and .zip. + properties: + Optional. A mapping of property names to values, used to + configure SparkR. Properties that conflict with values set by + the Dataproc API may be overwritten. Can include properties + set in /etc/spark/conf/spark-defaults.conf and classes in user + code. + logging_config: + Optional. The runtime log config for job execution. + """, + # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SparkRJob) + ), +) +_sym_db.RegisterMessage(SparkRJob) +_sym_db.RegisterMessage(SparkRJob.PropertiesEntry) + +PrestoJob = _reflection.GeneratedProtocolMessageType( + "PrestoJob", + (_message.Message,), + dict( + PropertiesEntry=_reflection.GeneratedProtocolMessageType( + "PropertiesEntry", + (_message.Message,), + dict( + DESCRIPTOR=_PRESTOJOB_PROPERTIESENTRY, + __module__="google.cloud.dataproc_v1.proto.jobs_pb2" + # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.PrestoJob.PropertiesEntry) + ), + ), + DESCRIPTOR=_PRESTOJOB, + __module__="google.cloud.dataproc_v1.proto.jobs_pb2", + __doc__="""A Dataproc job for running + `Presto `__ queries + + + Attributes: + queries: + Required. The sequence of Presto queries to execute, specified + as either an HCFS file URI or as a list of queries. + query_file_uri: + The HCFS URI of the script that contains SQL queries. + query_list: + A list of queries. + continue_on_failure: + Optional. Whether to continue executing queries if a query + fails. The default value is ``false``. Setting to ``true`` can + be useful when executing independent parallel queries. + output_format: + Optional. The format in which query output will be displayed. + See the Presto documentation for supported output formats + client_tags: + Optional. Presto client tags to attach to this query + properties: + Optional. A mapping of property names to values. Used to set + Presto `session properties + `__ + Equivalent to using the --session flag in the Presto CLI + logging_config: + Optional. The runtime log config for job execution. + """, + # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.PrestoJob) + ), +) +_sym_db.RegisterMessage(PrestoJob) +_sym_db.RegisterMessage(PrestoJob.PropertiesEntry) + JobPlacement = _reflection.GeneratedProtocolMessageType( "JobPlacement", (_message.Message,), @@ -3876,17 +4441,21 @@ Required. The application/framework-specific portion of the job. hadoop_job: - Job is a Hadoop job. + Optional. Job is a Hadoop job. spark_job: - Job is a Spark job. + Optional. Job is a Spark job. pyspark_job: - Job is a Pyspark job. + Optional. Job is a PySpark job. hive_job: - Job is a Hive job. + Optional. Job is a Hive job. pig_job: - Job is a Pig job. + Optional. Job is a Pig job. + spark_r_job: + Optional. Job is a SparkR job. spark_sql_job: - Job is a SparkSql job. + Optional. Job is a SparkSql job. + presto_job: + Optional. Job is a Presto job. status: Output only. The job status. Additional application-specific status information may be contained in the type\_job and @@ -4197,6 +4766,19 @@ _PIGJOB.fields_by_name["properties"]._options = None _PIGJOB.fields_by_name["jar_file_uris"]._options = None _PIGJOB.fields_by_name["logging_config"]._options = None +_SPARKRJOB_PROPERTIESENTRY._options = None +_SPARKRJOB.fields_by_name["main_r_file_uri"]._options = None +_SPARKRJOB.fields_by_name["args"]._options = None +_SPARKRJOB.fields_by_name["file_uris"]._options = None +_SPARKRJOB.fields_by_name["archive_uris"]._options = None +_SPARKRJOB.fields_by_name["properties"]._options = None +_SPARKRJOB.fields_by_name["logging_config"]._options = None +_PRESTOJOB_PROPERTIESENTRY._options = None +_PRESTOJOB.fields_by_name["continue_on_failure"]._options = None +_PRESTOJOB.fields_by_name["output_format"]._options = None +_PRESTOJOB.fields_by_name["client_tags"]._options = None +_PRESTOJOB.fields_by_name["properties"]._options = None +_PRESTOJOB.fields_by_name["logging_config"]._options = None _JOBPLACEMENT.fields_by_name["cluster_name"]._options = None _JOBPLACEMENT.fields_by_name["cluster_uuid"]._options = None _JOBSTATUS.fields_by_name["state"]._options = None @@ -4212,6 +4794,14 @@ _JOB_LABELSENTRY._options = None _JOB.fields_by_name["reference"]._options = None _JOB.fields_by_name["placement"]._options = None +_JOB.fields_by_name["hadoop_job"]._options = None +_JOB.fields_by_name["spark_job"]._options = None +_JOB.fields_by_name["pyspark_job"]._options = None +_JOB.fields_by_name["hive_job"]._options = None +_JOB.fields_by_name["pig_job"]._options = None +_JOB.fields_by_name["spark_r_job"]._options = None +_JOB.fields_by_name["spark_sql_job"]._options = None +_JOB.fields_by_name["presto_job"]._options = None _JOB.fields_by_name["status"]._options = None _JOB.fields_by_name["status_history"]._options = None _JOB.fields_by_name["yarn_applications"]._options = None @@ -4257,8 +4847,8 @@ serialized_options=_b( "\312A\027dataproc.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" ), - serialized_start=6104, - serialized_end=7283, + serialized_start=7010, + serialized_end=8189, methods=[ _descriptor.MethodDescriptor( name="SubmitJob", diff --git a/google/cloud/dataproc_v1/proto/jobs_pb2_grpc.py b/google/cloud/dataproc_v1/proto/jobs_pb2_grpc.py index d2706382..f10f0252 100644 --- a/google/cloud/dataproc_v1/proto/jobs_pb2_grpc.py +++ b/google/cloud/dataproc_v1/proto/jobs_pb2_grpc.py @@ -84,9 +84,9 @@ def UpdateJob(self, request, context): def CancelJob(self, request, context): """Starts a job cancellation request. To access the job resource after cancellation, call - [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) + [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or - [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). + [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") diff --git a/google/cloud/dataproc_v1/proto/operations.proto b/google/cloud/dataproc_v1/proto/operations.proto index 4af2a5f8..724d2a89 100644 --- a/google/cloud/dataproc_v1/proto/operations.proto +++ b/google/cloud/dataproc_v1/proto/operations.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/dataproc_v1/proto/shared.proto b/google/cloud/dataproc_v1/proto/shared.proto index 74bd56a8..c6ff8f28 100644 --- a/google/cloud/dataproc_v1/proto/shared.proto +++ b/google/cloud/dataproc_v1/proto/shared.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/dataproc_v1/proto/workflow_templates.proto b/google/cloud/dataproc_v1/proto/workflow_templates.proto index 30b5ced4..2db55798 100644 --- a/google/cloud/dataproc_v1/proto/workflow_templates.proto +++ b/google/cloud/dataproc_v1/proto/workflow_templates.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -78,9 +77,9 @@ service WorkflowTemplateService { // clusters to be deleted. // // The [Operation.metadata][google.longrunning.Operation.metadata] will be - // [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + // [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). // Also see [Using - // WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + // WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). // // On successful completion, // [Operation.response][google.longrunning.Operation.response] will be @@ -119,9 +118,9 @@ service WorkflowTemplateService { // clusters to be deleted. // // The [Operation.metadata][google.longrunning.Operation.metadata] will be - // [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + // [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). // Also see [Using - // WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + // WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). // // On successful completion, // [Operation.response][google.longrunning.Operation.response] will be @@ -320,22 +319,16 @@ message OrderedJob { // Required. The job definition. oneof job_type { - // Job is a Hadoop job. HadoopJob hadoop_job = 2; - // Job is a Spark job. SparkJob spark_job = 3; - // Job is a Pyspark job. PySparkJob pyspark_job = 4; - // Job is a Hive job. HiveJob hive_job = 5; - // Job is a Pig job. PigJob pig_job = 6; - // Job is a SparkSql job. SparkSqlJob spark_sql_job = 7; } @@ -708,9 +701,7 @@ message UpdateWorkflowTemplateRequest { // Required. The updated workflow template. // // The `template.version` field must match the current version. - WorkflowTemplate template = 1 [ - (google.api.field_behavior) = REQUIRED - ]; + WorkflowTemplate template = 1 [(google.api.field_behavior) = REQUIRED]; } // A request to list workflow templates in a project. diff --git a/google/cloud/dataproc_v1/proto/workflow_templates_pb2.py b/google/cloud/dataproc_v1/proto/workflow_templates_pb2.py index 0c3125b1..10d4ade0 100644 --- a/google/cloud/dataproc_v1/proto/workflow_templates_pb2.py +++ b/google/cloud/dataproc_v1/proto/workflow_templates_pb2.py @@ -2641,18 +2641,6 @@ hyphen. Must consist of between 3 and 50 characters. job_type: Required. The job definition. - hadoop_job: - Job is a Hadoop job. - spark_job: - Job is a Spark job. - pyspark_job: - Job is a Pyspark job. - hive_job: - Job is a Hive job. - pig_job: - Job is a Pig job. - spark_sql_job: - Job is a SparkSql job. labels: Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long. Label values must be between diff --git a/google/cloud/dataproc_v1/proto/workflow_templates_pb2_grpc.py b/google/cloud/dataproc_v1/proto/workflow_templates_pb2_grpc.py index 3d9079ac..f766e9ea 100644 --- a/google/cloud/dataproc_v1/proto/workflow_templates_pb2_grpc.py +++ b/google/cloud/dataproc_v1/proto/workflow_templates_pb2_grpc.py @@ -94,9 +94,9 @@ def InstantiateWorkflowTemplate(self, request, context): clusters to be deleted. The [Operation.metadata][google.longrunning.Operation.metadata] will be - [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see [Using - WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). On successful completion, [Operation.response][google.longrunning.Operation.response] will be @@ -124,9 +124,9 @@ def InstantiateInlineWorkflowTemplate(self, request, context): clusters to be deleted. The [Operation.metadata][google.longrunning.Operation.metadata] will be - [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see [Using - WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). On successful completion, [Operation.response][google.longrunning.Operation.response] will be diff --git a/google/cloud/dataproc_v1beta2/gapic/autoscaling_policy_service_client.py b/google/cloud/dataproc_v1beta2/gapic/autoscaling_policy_service_client.py index 9f7b4695..e2d19713 100644 --- a/google/cloud/dataproc_v1beta2/gapic/autoscaling_policy_service_client.py +++ b/google/cloud/dataproc_v1beta2/gapic/autoscaling_policy_service_client.py @@ -47,7 +47,7 @@ class AutoscalingPolicyServiceClient(object): """ The API interface for managing autoscaling policies in the - Google Cloud Dataproc API. + Cloud Dataproc API. """ SERVICE_ADDRESS = "dataproc.googleapis.com:443" diff --git a/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py b/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py index e64fc0d7..267dee2a 100644 --- a/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py +++ b/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py @@ -239,7 +239,7 @@ def create_cluster( Args: project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. - region (str): Required. The Cloud Dataproc region in which to handle the request. + region (str): Required. The Dataproc region in which to handle the request. cluster (Union[dict, ~google.cloud.dataproc_v1beta2.types.Cluster]): Required. The cluster to create. If a dict is provided, it must be of the same form as the protobuf @@ -350,7 +350,7 @@ def update_cluster( Args: project_id (str): Required. The ID of the Google Cloud Platform project the cluster belongs to. - region (str): Required. The Cloud Dataproc region in which to handle the request. + region (str): Required. The Dataproc region in which to handle the request. cluster_name (str): Required. The cluster name. cluster (Union[dict, ~google.cloud.dataproc_v1beta2.types.Cluster]): Required. The changes to the cluster. @@ -428,10 +428,12 @@ def update_cluster( message :class:`~google.cloud.dataproc_v1beta2.types.FieldMask` graceful_decommission_timeout (Union[dict, ~google.cloud.dataproc_v1beta2.types.Duration]): Optional. Timeout for graceful YARN decomissioning. Graceful decommissioning allows removing nodes from the cluster without - interrupting jobs in progress. Timeout specifies how long to wait for jobs - in progress to finish before forcefully removing nodes (and potentially - interrupting jobs). Default timeout is 0 (for forceful decommission), and - the maximum allowed timeout is 1 day. + interrupting jobs in progress. Timeout specifies how long to wait for + jobs in progress to finish before forcefully removing nodes (and + potentially interrupting jobs). Default timeout is 0 (for forceful + decommission), and the maximum allowed timeout is 1 day (see JSON + representation of + `Duration `__). Only supported on Dataproc image versions 1.2 and higher. @@ -541,7 +543,7 @@ def delete_cluster( Args: project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. - region (str): Required. The Cloud Dataproc region in which to handle the request. + region (str): Required. The Dataproc region in which to handle the request. cluster_name (str): Required. The cluster name. cluster_uuid (str): Optional. Specifying the ``cluster_uuid`` means the RPC should fail (with error NOT\_FOUND) if cluster with specified UUID does not exist. @@ -634,7 +636,7 @@ def get_cluster( Args: project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. - region (str): Required. The Cloud Dataproc region in which to handle the request. + region (str): Required. The Dataproc region in which to handle the request. cluster_name (str): Required. The cluster name. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will @@ -714,7 +716,7 @@ def list_clusters( Args: project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. - region (str): Required. The Cloud Dataproc region in which to handle the request. + region (str): Required. The Dataproc region in which to handle the request. filter_ (str): Optional. A filter constraining the clusters to list. Filters are case-sensitive and have the following syntax: @@ -805,7 +807,7 @@ def diagnose_cluster( will be `ClusterOperationMetadata `__. After the operation completes, ``Operation.response`` contains - `Empty `__. + ``Empty``. Example: >>> from google.cloud import dataproc_v1beta2 @@ -835,7 +837,7 @@ def diagnose_cluster( Args: project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. - region (str): Required. The Cloud Dataproc region in which to handle the request. + region (str): Required. The Dataproc region in which to handle the request. cluster_name (str): Required. The cluster name. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will diff --git a/google/cloud/dataproc_v1beta2/gapic/enums.py b/google/cloud/dataproc_v1beta2/gapic/enums.py index 8c3b0980..d29e1992 100644 --- a/google/cloud/dataproc_v1beta2/gapic/enums.py +++ b/google/cloud/dataproc_v1beta2/gapic/enums.py @@ -97,7 +97,7 @@ class Substate(enum.IntEnum): Applies to RUNNING state. STALE_STATUS (int): The agent-reported status is out of date (may occur if - Cloud Dataproc loses communication with Agent). + Dataproc loses communication with Agent). Applies to RUNNING state. """ @@ -156,7 +156,7 @@ class Substate(enum.IntEnum): Applies to RUNNING state. STALE_STATUS (int): The agent-reported status is out of date, which may be caused by a - loss of communication between the agent and Cloud Dataproc. If the + loss of communication between the agent and Dataproc. If the agent does not send a timely update, the job will fail. Applies to RUNNING state. diff --git a/google/cloud/dataproc_v1beta2/gapic/job_controller_client.py b/google/cloud/dataproc_v1beta2/gapic/job_controller_client.py index 21b6ca49..65adbc7e 100644 --- a/google/cloud/dataproc_v1beta2/gapic/job_controller_client.py +++ b/google/cloud/dataproc_v1beta2/gapic/job_controller_client.py @@ -223,7 +223,7 @@ def submit_job( Args: project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. - region (str): Required. The Cloud Dataproc region in which to handle the request. + region (str): Required. The Dataproc region in which to handle the request. job (Union[dict, ~google.cloud.dataproc_v1beta2.types.Job]): Required. The job resource. If a dict is provided, it must be of the same form as the protobuf @@ -306,7 +306,7 @@ def get_job( Args: project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. - region (str): Required. The Cloud Dataproc region in which to handle the request. + region (str): Required. The Dataproc region in which to handle the request. job_id (str): Required. The job ID. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will @@ -388,7 +388,7 @@ def list_jobs( Args: project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. - region (str): Required. The Cloud Dataproc region in which to handle the request. + region (str): Required. The Dataproc region in which to handle the request. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page @@ -509,7 +509,7 @@ def update_job( Args: project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. - region (str): Required. The Cloud Dataproc region in which to handle the request. + region (str): Required. The Dataproc region in which to handle the request. job_id (str): Required. The job ID. job (Union[dict, ~google.cloud.dataproc_v1beta2.types.Job]): Required. The changes to the job. @@ -599,7 +599,7 @@ def cancel_job( Args: project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. - region (str): Required. The Cloud Dataproc region in which to handle the request. + region (str): Required. The Dataproc region in which to handle the request. job_id (str): Required. The job ID. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will @@ -670,7 +670,7 @@ def delete_job( Args: project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. - region (str): Required. The Cloud Dataproc region in which to handle the request. + region (str): Required. The Dataproc region in which to handle the request. job_id (str): Required. The job ID. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will diff --git a/google/cloud/dataproc_v1beta2/gapic/transports/cluster_controller_grpc_transport.py b/google/cloud/dataproc_v1beta2/gapic/transports/cluster_controller_grpc_transport.py index 767268e5..25cd8277 100644 --- a/google/cloud/dataproc_v1beta2/gapic/transports/cluster_controller_grpc_transport.py +++ b/google/cloud/dataproc_v1beta2/gapic/transports/cluster_controller_grpc_transport.py @@ -194,7 +194,7 @@ def diagnose_cluster(self): will be `ClusterOperationMetadata `__. After the operation completes, ``Operation.response`` contains - `Empty `__. + ``Empty``. Returns: Callable: A callable which accepts the appropriate diff --git a/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py b/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py index 5319e2f1..c39f0267 100644 --- a/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py +++ b/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py @@ -59,7 +59,7 @@ class WorkflowTemplateServiceClient(object): """ The API interface for managing Workflow Templates in the - Cloud Dataproc API. + Dataproc API. """ SERVICE_ADDRESS = "dataproc.googleapis.com:443" diff --git a/google/cloud/dataproc_v1beta2/proto/autoscaling_policies.proto b/google/cloud/dataproc_v1beta2/proto/autoscaling_policies.proto index 36d507c8..e5d16fd9 100644 --- a/google/cloud/dataproc_v1beta2/proto/autoscaling_policies.proto +++ b/google/cloud/dataproc_v1beta2/proto/autoscaling_policies.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -30,7 +29,7 @@ option java_outer_classname = "AutoscalingPoliciesProto"; option java_package = "com.google.cloud.dataproc.v1beta2"; // The API interface for managing autoscaling policies in the -// Google Cloud Dataproc API. +// Cloud Dataproc API. service AutoscalingPolicyService { option (google.api.default_host) = "dataproc.googleapis.com"; option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; diff --git a/google/cloud/dataproc_v1beta2/proto/autoscaling_policies_pb2.py b/google/cloud/dataproc_v1beta2/proto/autoscaling_policies_pb2.py index e10b0b75..7c3be028 100644 --- a/google/cloud/dataproc_v1beta2/proto/autoscaling_policies_pb2.py +++ b/google/cloud/dataproc_v1beta2/proto/autoscaling_policies_pb2.py @@ -792,7 +792,8 @@ dict( DESCRIPTOR=_AUTOSCALINGPOLICY, __module__="google.cloud.dataproc_v1beta2.proto.autoscaling_policies_pb2", - __doc__="""Describes an autoscaling policy for Dataproc cluster autoscaler. + __doc__="""Describes an autoscaling policy for Dataproc cluster + autoscaler. Attributes: diff --git a/google/cloud/dataproc_v1beta2/proto/autoscaling_policies_pb2_grpc.py b/google/cloud/dataproc_v1beta2/proto/autoscaling_policies_pb2_grpc.py index 1e6910a5..0163633a 100644 --- a/google/cloud/dataproc_v1beta2/proto/autoscaling_policies_pb2_grpc.py +++ b/google/cloud/dataproc_v1beta2/proto/autoscaling_policies_pb2_grpc.py @@ -9,7 +9,7 @@ class AutoscalingPolicyServiceStub(object): """The API interface for managing autoscaling policies in the - Google Cloud Dataproc API. + Cloud Dataproc API. """ def __init__(self, channel): @@ -47,7 +47,7 @@ def __init__(self, channel): class AutoscalingPolicyServiceServicer(object): """The API interface for managing autoscaling policies in the - Google Cloud Dataproc API. + Cloud Dataproc API. """ def CreateAutoscalingPolicy(self, request, context): diff --git a/google/cloud/dataproc_v1beta2/proto/clusters.proto b/google/cloud/dataproc_v1beta2/proto/clusters.proto index 4b2ee649..2e9e648c 100644 --- a/google/cloud/dataproc_v1beta2/proto/clusters.proto +++ b/google/cloud/dataproc_v1beta2/proto/clusters.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -20,7 +19,7 @@ package google.cloud.dataproc.v1beta2; import "google/api/annotations.proto"; import "google/api/client.proto"; import "google/api/field_behavior.proto"; -import "google/cloud/dataproc/v1beta2/operations.proto"; +import "google/api/resource.proto"; import "google/cloud/dataproc/v1beta2/shared.proto"; import "google/longrunning/operations.proto"; import "google/protobuf/duration.proto"; @@ -40,7 +39,7 @@ service ClusterController { // Creates a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1beta2/projects/{project_id}/regions/{region}/clusters" @@ -55,7 +54,7 @@ service ClusterController { // Updates a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). rpc UpdateCluster(UpdateClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { patch: "/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}" @@ -70,7 +69,7 @@ service ClusterController { // Deletes a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). rpc DeleteCluster(DeleteClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { delete: "/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}" @@ -101,11 +100,11 @@ service ClusterController { // Gets cluster diagnostic information. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). // After the operation completes, // [Operation.response][google.longrunning.Operation.response] // contains - // [Empty](google.protobuf.Empty). + // [Empty][google.protobuf.Empty]. rpc DiagnoseCluster(DiagnoseClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose" @@ -129,7 +128,7 @@ message Cluster { // unique. Names of deleted clusters can be reused. string cluster_name = 2 [(google.api.field_behavior) = REQUIRED]; - // Required. The cluster config. Note that Cloud Dataproc may set + // Required. The cluster config. Note that Dataproc may set // default values, and values may change when clusters are updated. ClusterConfig config = 3 [(google.api.field_behavior) = REQUIRED]; @@ -148,7 +147,7 @@ message Cluster { // Output only. The previous cluster status. repeated ClusterStatus status_history = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc + // Output only. A cluster UUID (Unique Universal Identifier). Dataproc // generates this value when it creates the cluster. string cluster_uuid = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; @@ -161,14 +160,14 @@ message Cluster { // The cluster config. message ClusterConfig { - // Optional. A Google Cloud Storage bucket used to stage job + // Optional. A Cloud Storage bucket used to stage job // dependencies, config files, and job driver console output. // If you do not specify a staging bucket, Cloud // Dataproc will determine a Cloud Storage location (US, - // ASIA, or EU) for your cluster's staging bucket according to the Google + // ASIA, or EU) for your cluster's staging bucket according to the // Compute Engine zone where your cluster is deployed, and then create // and manage this project-level, per-location bucket (see - // [Cloud Dataproc staging + // [Dataproc staging // bucket](/dataproc/docs/concepts/configuring-clusters/staging-bucket)). string config_bucket = 1 [(google.api.field_behavior) = OPTIONAL]; @@ -244,7 +243,7 @@ message AutoscalingConfig { // * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` // * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` // - // Note that the policy must be in the same project and Cloud Dataproc region. + // Note that the policy must be in the same project and Dataproc region. string policy_uri = 1 [(google.api.field_behavior) = OPTIONAL]; } @@ -260,7 +259,7 @@ message EncryptionConfig { message GceClusterConfig { // Optional. The zone where the Compute Engine cluster will be located. // On a create request, it is required in the "global" region. If omitted - // in a non-global Cloud Dataproc region, the service will pick a zone in the + // in a non-global Dataproc region, the service will pick a zone in the // corresponding Compute Engine region. On a get request, zone will always be // present. // @@ -302,17 +301,17 @@ message GceClusterConfig { // configured to be accessible without external IP addresses. bool internal_ip_only = 7 [(google.api.field_behavior) = OPTIONAL]; - // Optional. The service account of the instances. Defaults to the default - // Compute Engine service account. Custom service accounts need - // permissions equivalent to the following IAM roles: - // - // * roles/logging.logWriter - // * roles/storage.objectAdmin - // - // (see - // https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts - // for more information). - // Example: `[account_id]@[project_id].iam.gserviceaccount.com` + // Optional. The [Dataproc service + // account](/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) + // (also see [VM Data Plane + // identity](/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) + // used by Dataproc cluster VM instances to access Google Cloud Platform + // services. + // + // If not specified, the + // [Compute Engine default service + // account](/compute/docs/access/service-accounts#default_service_account) + // is used. string service_account = 8 [(google.api.field_behavior) = OPTIONAL]; // Optional. The URIs of service account scopes to be included in @@ -351,7 +350,7 @@ message InstanceGroupConfig { // For master instance groups, must be set to 1. int32 num_instances = 1 [(google.api.field_behavior) = OPTIONAL]; - // Output only. The list of instance names. Cloud Dataproc derives the names + // Output only. The list of instance names. Dataproc derives the names // from `cluster_name`, `num_instances`, and the instance group. repeated string instance_names = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; @@ -368,7 +367,7 @@ message InstanceGroupConfig { // * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` // * `n1-standard-2` // - // **Auto Zone Exception**: If you are using the Cloud Dataproc + // **Auto Zone Exception**: If you are using the Dataproc // [Auto Zone // Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) // feature, you must use the short name of the machine type @@ -392,7 +391,7 @@ message InstanceGroupConfig { repeated AcceleratorConfig accelerators = 8 [(google.api.field_behavior) = OPTIONAL]; // Specifies the minimum cpu platform for the Instance Group. - // See [Cloud Dataproc→Minimum CPU Platform] + // See [Dataproc→Minimum CPU Platform] // (/dataproc/docs/concepts/compute/dataproc-min-cpu). string min_cpu_platform = 9; } @@ -420,7 +419,7 @@ message AcceleratorConfig { // * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` // * `nvidia-tesla-k80` // - // **Auto Zone Exception**: If you are using the Cloud Dataproc + // **Auto Zone Exception**: If you are using the Dataproc // [Auto Zone // Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) // feature, you must use the short name of the accelerator type @@ -452,29 +451,31 @@ message DiskConfig { // Specifies the cluster auto-delete schedule configuration. message LifecycleConfig { - // Optional. The duration to keep the cluster alive while idling. - // Passing this threshold will cause the cluster to be - // deleted. Valid range: **[10m, 14d]**. - // - // Example: **"10m"**, the minimum value, to delete the - // cluster when it has had no jobs running for 10 minutes. + // Optional. The duration to keep the cluster alive while idling (when no jobs + // are running). Passing this threshold will cause the cluster to be + // deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON + // representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json). google.protobuf.Duration idle_delete_ttl = 1 [(google.api.field_behavior) = OPTIONAL]; // Either the exact time the cluster should be deleted at or // the cluster maximum age. oneof ttl { - // Optional. The time when cluster will be auto-deleted. - google.protobuf.Timestamp auto_delete_time = 2; + // Optional. The time when cluster will be auto-deleted. (see JSON representation of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Timestamp auto_delete_time = 2 [(google.api.field_behavior) = OPTIONAL]; // Optional. The lifetime duration of cluster. The cluster will be - // auto-deleted at the end of this period. Valid range: **[10m, 14d]**. - // - // Example: **"1d"**, to delete the cluster 1 day after its creation.. - google.protobuf.Duration auto_delete_ttl = 3; + // auto-deleted at the end of this period. Minimum value is 10 minutes; + // maximum value is 14 days (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Duration auto_delete_ttl = 3 [(google.api.field_behavior) = OPTIONAL]; } // Output only. The time when cluster became idle (most recent job finished) - // and became eligible for deletion due to idleness. + // and became eligible for deletion due to idleness (see JSON representation + // of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). google.protobuf.Timestamp idle_start_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; } @@ -560,7 +561,10 @@ message NodeInitializationAction { string executable_file = 1 [(google.api.field_behavior) = REQUIRED]; // Optional. Amount of time executable has to complete. Default is - // 10 minutes. Cluster creation fails with an explanatory error message (the + // 10 minutes (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + // + // Cluster creation fails with an explanatory error message (the // name of the executable that caused the error and the exceeded timeout // period) if the executable is not completed at end of the timeout period. google.protobuf.Duration execution_timeout = 2 [(google.api.field_behavior) = OPTIONAL]; @@ -602,7 +606,7 @@ message ClusterStatus { UNHEALTHY = 1; // The agent-reported status is out of date (may occur if - // Cloud Dataproc loses communication with Agent). + // Dataproc loses communication with Agent). // // Applies to RUNNING state. STALE_STATUS = 2; @@ -614,7 +618,8 @@ message ClusterStatus { // Output only. Optional details of cluster's state. string detail = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Output only. Time when this state was entered. + // Output only. Time when this state was entered (see JSON representation of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). google.protobuf.Timestamp state_start_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Additional state information that includes @@ -625,7 +630,7 @@ message ClusterStatus { // Specifies the selection and config of software inside the cluster. message SoftwareConfig { // Optional. The version of software inside the cluster. It must be one of the - // supported [Cloud Dataproc + // supported [Dataproc // Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), // such as "1.2" (including a subminor version, such as "1.2.29"), or the // ["preview" @@ -675,7 +680,7 @@ message CreateClusterRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The cluster to create. @@ -701,7 +706,7 @@ message UpdateClusterRequest { // cluster belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 5 [(google.api.field_behavior) = REQUIRED]; // Required. The cluster name. @@ -715,7 +720,8 @@ message UpdateClusterRequest { // interrupting jobs in progress. Timeout specifies how long to wait for jobs // in progress to finish before forcefully removing nodes (and potentially // interrupting jobs). Default timeout is 0 (for forceful decommission), and - // the maximum allowed timeout is 1 day. + // the maximum allowed timeout is 1 day (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). // // Only supported on Dataproc image versions 1.2 and higher. google.protobuf.Duration graceful_decommission_timeout = 6 [(google.api.field_behavior) = OPTIONAL]; @@ -802,7 +808,7 @@ message DeleteClusterRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The cluster name. @@ -832,7 +838,7 @@ message GetClusterRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The cluster name. @@ -845,7 +851,7 @@ message ListClustersRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 4 [(google.api.field_behavior) = REQUIRED]; // Optional. A filter constraining the clusters to list. Filters are @@ -893,7 +899,7 @@ message DiagnoseClusterRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The cluster name. diff --git a/google/cloud/dataproc_v1beta2/proto/clusters_pb2.py b/google/cloud/dataproc_v1beta2/proto/clusters_pb2.py index d043480d..33bd6924 100644 --- a/google/cloud/dataproc_v1beta2/proto/clusters_pb2.py +++ b/google/cloud/dataproc_v1beta2/proto/clusters_pb2.py @@ -19,9 +19,7 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.api import client_pb2 as google_dot_api_dot_client__pb2 from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.cloud.dataproc_v1beta2.proto import ( - operations_pb2 as google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_operations__pb2, -) +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.dataproc_v1beta2.proto import ( shared_pb2 as google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_shared__pb2, ) @@ -41,13 +39,13 @@ "\n!com.google.cloud.dataproc.v1beta2B\rClustersProtoP\001ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataproc" ), serialized_pb=_b( - '\n2google/cloud/dataproc_v1beta2/proto/clusters.proto\x12\x1dgoogle.cloud.dataproc.v1beta2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x34google/cloud/dataproc_v1beta2/proto/operations.proto\x1a\x30google/cloud/dataproc_v1beta2/proto/shared.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xe6\x03\n\x07\x43luster\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x41\n\x06\x63onfig\x18\x03 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.ClusterConfigB\x03\xe0\x41\x02\x12G\n\x06labels\x18\x08 \x03(\x0b\x32\x32.google.cloud.dataproc.v1beta2.Cluster.LabelsEntryB\x03\xe0\x41\x01\x12\x41\n\x06status\x18\x04 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.ClusterStatusB\x03\xe0\x41\x03\x12I\n\x0estatus_history\x18\x07 \x03(\x0b\x32,.google.cloud.dataproc.v1beta2.ClusterStatusB\x03\xe0\x41\x03\x12\x19\n\x0c\x63luster_uuid\x18\x06 \x01(\tB\x03\xe0\x41\x03\x12\x43\n\x07metrics\x18\t \x01(\x0b\x32-.google.cloud.dataproc.v1beta2.ClusterMetricsB\x03\xe0\x41\x03\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xaf\x07\n\rClusterConfig\x12\x1a\n\rconfig_bucket\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12P\n\x12gce_cluster_config\x18\x08 \x01(\x0b\x32/.google.cloud.dataproc.v1beta2.GceClusterConfigB\x03\xe0\x41\x01\x12N\n\rmaster_config\x18\t \x01(\x0b\x32\x32.google.cloud.dataproc.v1beta2.InstanceGroupConfigB\x03\xe0\x41\x01\x12N\n\rworker_config\x18\n \x01(\x0b\x32\x32.google.cloud.dataproc.v1beta2.InstanceGroupConfigB\x03\xe0\x41\x01\x12X\n\x17secondary_worker_config\x18\x0c \x01(\x0b\x32\x32.google.cloud.dataproc.v1beta2.InstanceGroupConfigB\x03\xe0\x41\x01\x12K\n\x0fsoftware_config\x18\r \x01(\x0b\x32-.google.cloud.dataproc.v1beta2.SoftwareConfigB\x03\xe0\x41\x01\x12M\n\x10lifecycle_config\x18\x0e \x01(\x0b\x32..google.cloud.dataproc.v1beta2.LifecycleConfigB\x03\xe0\x41\x01\x12\\\n\x16initialization_actions\x18\x0b \x03(\x0b\x32\x37.google.cloud.dataproc.v1beta2.NodeInitializationActionB\x03\xe0\x41\x01\x12O\n\x11\x65ncryption_config\x18\x0f \x01(\x0b\x32/.google.cloud.dataproc.v1beta2.EncryptionConfigB\x03\xe0\x41\x01\x12Q\n\x12\x61utoscaling_config\x18\x10 \x01(\x0b\x32\x30.google.cloud.dataproc.v1beta2.AutoscalingConfigB\x03\xe0\x41\x01\x12K\n\x0f\x65ndpoint_config\x18\x11 \x01(\x0b\x32-.google.cloud.dataproc.v1beta2.EndpointConfigB\x03\xe0\x41\x01\x12K\n\x0fsecurity_config\x18\x12 \x01(\x0b\x32-.google.cloud.dataproc.v1beta2.SecurityConfigB\x03\xe0\x41\x01"\xbf\x01\n\x0e\x45ndpointConfig\x12U\n\nhttp_ports\x18\x01 \x03(\x0b\x32<.google.cloud.dataproc.v1beta2.EndpointConfig.HttpPortsEntryB\x03\xe0\x41\x03\x12$\n\x17\x65nable_http_port_access\x18\x02 \x01(\x08\x42\x03\xe0\x41\x01\x1a\x30\n\x0eHttpPortsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01",\n\x11\x41utoscalingConfig\x12\x17\n\npolicy_uri\x18\x01 \x01(\tB\x03\xe0\x41\x01"4\n\x10\x45ncryptionConfig\x12 \n\x13gce_pd_kms_key_name\x18\x01 \x01(\tB\x03\xe0\x41\x01"\xa9\x03\n\x10GceClusterConfig\x12\x15\n\x08zone_uri\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0bnetwork_uri\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0esubnetwork_uri\x18\x06 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10internal_ip_only\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12\x1c\n\x0fservice_account\x18\x08 \x01(\tB\x03\xe0\x41\x01\x12#\n\x16service_account_scopes\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x0c\n\x04tags\x18\x04 \x03(\t\x12O\n\x08metadata\x18\x05 \x03(\x0b\x32=.google.cloud.dataproc.v1beta2.GceClusterConfig.MetadataEntry\x12U\n\x14reservation_affinity\x18\x0b \x01(\x0b\x32\x32.google.cloud.dataproc.v1beta2.ReservationAffinityB\x03\xe0\x41\x01\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xa4\x03\n\x13InstanceGroupConfig\x12\x1a\n\rnum_instances\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1b\n\x0einstance_names\x18\x02 \x03(\tB\x03\xe0\x41\x03\x12\x16\n\timage_uri\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10machine_type_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x43\n\x0b\x64isk_config\x18\x05 \x01(\x0b\x32).google.cloud.dataproc.v1beta2.DiskConfigB\x03\xe0\x41\x01\x12\x1b\n\x0eis_preemptible\x18\x06 \x01(\x08\x42\x03\xe0\x41\x01\x12T\n\x14managed_group_config\x18\x07 \x01(\x0b\x32\x31.google.cloud.dataproc.v1beta2.ManagedGroupConfigB\x03\xe0\x41\x03\x12K\n\x0c\x61\x63\x63\x65lerators\x18\x08 \x03(\x0b\x32\x30.google.cloud.dataproc.v1beta2.AcceleratorConfigB\x03\xe0\x41\x01\x12\x18\n\x10min_cpu_platform\x18\t \x01(\t"c\n\x12ManagedGroupConfig\x12#\n\x16instance_template_name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12(\n\x1binstance_group_manager_name\x18\x02 \x01(\tB\x03\xe0\x41\x03"L\n\x11\x41\x63\x63\x65leratorConfig\x12\x1c\n\x14\x61\x63\x63\x65lerator_type_uri\x18\x01 \x01(\t\x12\x19\n\x11\x61\x63\x63\x65lerator_count\x18\x02 \x01(\x05"a\n\nDiskConfig\x12\x1b\n\x0e\x62oot_disk_type\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x1e\n\x11\x62oot_disk_size_gb\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01\x12\x16\n\x0enum_local_ssds\x18\x02 \x01(\x05"\xf9\x01\n\x0fLifecycleConfig\x12\x37\n\x0fidle_delete_ttl\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01\x12\x36\n\x10\x61uto_delete_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x34\n\x0f\x61uto_delete_ttl\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x38\n\x0fidle_start_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x42\x05\n\x03ttl"X\n\x0eSecurityConfig\x12\x46\n\x0fkerberos_config\x18\x01 \x01(\x0b\x32-.google.cloud.dataproc.v1beta2.KerberosConfig"\x90\x04\n\x0eKerberosConfig\x12\x1c\n\x0f\x65nable_kerberos\x18\x01 \x01(\x08\x42\x03\xe0\x41\x01\x12(\n\x1broot_principal_password_uri\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x18\n\x0bkms_key_uri\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0ckeystore_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0etruststore_uri\x18\x05 \x01(\tB\x03\xe0\x41\x01\x12"\n\x15keystore_password_uri\x18\x06 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10key_password_uri\x18\x07 \x01(\tB\x03\xe0\x41\x01\x12$\n\x17truststore_password_uri\x18\x08 \x01(\tB\x03\xe0\x41\x01\x12$\n\x17\x63ross_realm_trust_realm\x18\t \x01(\tB\x03\xe0\x41\x01\x12"\n\x15\x63ross_realm_trust_kdc\x18\n \x01(\tB\x03\xe0\x41\x01\x12+\n\x1e\x63ross_realm_trust_admin_server\x18\x0b \x01(\tB\x03\xe0\x41\x01\x12\x32\n%cross_realm_trust_shared_password_uri\x18\x0c \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0ekdc_db_key_uri\x18\r \x01(\tB\x03\xe0\x41\x01\x12\x1f\n\x12tgt_lifetime_hours\x18\x0e \x01(\x05\x42\x03\xe0\x41\x01\x12\x12\n\x05realm\x18\x0f \x01(\tB\x03\xe0\x41\x01"s\n\x18NodeInitializationAction\x12\x1c\n\x0f\x65xecutable_file\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x39\n\x11\x65xecution_timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01"\x8b\x03\n\rClusterStatus\x12\x46\n\x05state\x18\x01 \x01(\x0e\x32\x32.google.cloud.dataproc.v1beta2.ClusterStatus.StateB\x03\xe0\x41\x03\x12\x13\n\x06\x64\x65tail\x18\x02 \x01(\tB\x03\xe0\x41\x03\x12\x39\n\x10state_start_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12L\n\x08substate\x18\x04 \x01(\x0e\x32\x35.google.cloud.dataproc.v1beta2.ClusterStatus.SubstateB\x03\xe0\x41\x03"V\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\t\n\x05\x45RROR\x10\x03\x12\x0c\n\x08\x44\x45LETING\x10\x04\x12\x0c\n\x08UPDATING\x10\x05"<\n\x08Substate\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\r\n\tUNHEALTHY\x10\x01\x12\x10\n\x0cSTALE_STATUS\x10\x02"\xfe\x01\n\x0eSoftwareConfig\x12\x1a\n\rimage_version\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12V\n\nproperties\x18\x02 \x03(\x0b\x32=.google.cloud.dataproc.v1beta2.SoftwareConfig.PropertiesEntryB\x03\xe0\x41\x01\x12\x45\n\x13optional_components\x18\x03 \x03(\x0e\x32(.google.cloud.dataproc.v1beta2.Component\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xa4\x02\n\x0e\x43lusterMetrics\x12T\n\x0chdfs_metrics\x18\x01 \x03(\x0b\x32>.google.cloud.dataproc.v1beta2.ClusterMetrics.HdfsMetricsEntry\x12T\n\x0cyarn_metrics\x18\x02 \x03(\x0b\x32>.google.cloud.dataproc.v1beta2.ClusterMetrics.YarnMetricsEntry\x1a\x32\n\x10HdfsMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1a\x32\n\x10YarnMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01"\x9b\x01\n\x14\x43reateClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12<\n\x07\x63luster\x18\x02 \x01(\x0b\x32&.google.cloud.dataproc.v1beta2.ClusterB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x04 \x01(\tB\x03\xe0\x41\x01"\xb3\x02\n\x14UpdateClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x05 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12<\n\x07\x63luster\x18\x03 \x01(\x0b\x32&.google.cloud.dataproc.v1beta2.ClusterB\x03\xe0\x41\x02\x12\x45\n\x1dgraceful_decommission_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01\x12\x34\n\x0bupdate_mask\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x07 \x01(\tB\x03\xe0\x41\x01"\x93\x01\n\x14\x44\x65leteClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_uuid\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x17\n\nrequest_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\\\n\x11GetClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02"\x89\x01\n\x13ListClustersRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x04 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06\x66ilter\x18\x05 \x01(\tB\x03\xe0\x41\x01\x12\x16\n\tpage_size\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x17\n\npage_token\x18\x03 \x01(\tB\x03\xe0\x41\x01"s\n\x14ListClustersResponse\x12=\n\x08\x63lusters\x18\x01 \x03(\x0b\x32&.google.cloud.dataproc.v1beta2.ClusterB\x03\xe0\x41\x03\x12\x1c\n\x0fnext_page_token\x18\x02 \x01(\tB\x03\xe0\x41\x03"a\n\x16\x44iagnoseClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02"1\n\x16\x44iagnoseClusterResults\x12\x17\n\noutput_uri\x18\x01 \x01(\tB\x03\xe0\x41\x03"\xfd\x01\n\x13ReservationAffinity\x12^\n\x18\x63onsume_reservation_type\x18\x01 \x01(\x0e\x32\x37.google.cloud.dataproc.v1beta2.ReservationAffinity.TypeB\x03\xe0\x41\x01\x12\x10\n\x03key\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x13\n\x06values\x18\x03 \x03(\tB\x03\xe0\x41\x01"_\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x12\n\x0eNO_RESERVATION\x10\x01\x12\x13\n\x0f\x41NY_RESERVATION\x10\x02\x12\x18\n\x14SPECIFIC_RESERVATION\x10\x03\x32\xe7\r\n\x11\x43lusterController\x12\x91\x02\n\rCreateCluster\x12\x33.google.cloud.dataproc.v1beta2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"\xab\x01\x82\xd3\xe4\x93\x02\x43"8/v1beta2/projects/{project_id}/regions/{region}/clusters:\x07\x63luster\xda\x41\x1bproject_id, region, cluster\xca\x41\x41\n\x07\x43luster\x12\x36google.cloud.dataproc.v1beta2.ClusterOperationMetadata\x12\xbb\x02\n\rUpdateCluster\x12\x33.google.cloud.dataproc.v1beta2.UpdateClusterRequest\x1a\x1d.google.longrunning.Operation"\xd5\x01\x82\xd3\xe4\x93\x02R2G/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}:\x07\x63luster\xda\x41\x36project_id, region, cluster_name, cluster, update_mask\xca\x41\x41\n\x07\x43luster\x12\x36google.cloud.dataproc.v1beta2.ClusterOperationMetadata\x12\xaa\x02\n\rDeleteCluster\x12\x33.google.cloud.dataproc.v1beta2.DeleteClusterRequest\x1a\x1d.google.longrunning.Operation"\xc4\x01\x82\xd3\xe4\x93\x02I*G/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}\xda\x41 project_id, region, cluster_name\xca\x41O\n\x15google.protobuf.Empty\x12\x36google.cloud.dataproc.v1beta2.ClusterOperationMetadata\x12\xda\x01\n\nGetCluster\x12\x30.google.cloud.dataproc.v1beta2.GetClusterRequest\x1a&.google.cloud.dataproc.v1beta2.Cluster"r\x82\xd3\xe4\x93\x02I\x12G/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}\xda\x41 project_id, region, cluster_name\x12\xeb\x01\n\x0cListClusters\x12\x32.google.cloud.dataproc.v1beta2.ListClustersRequest\x1a\x33.google.cloud.dataproc.v1beta2.ListClustersResponse"r\x82\xd3\xe4\x93\x02:\x12\x38/v1beta2/projects/{project_id}/regions/{region}/clusters\xda\x41\x12project_id, region\xda\x41\x1aproject_id, region, filter\x12\xba\x02\n\x0f\x44iagnoseCluster\x12\x35.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest\x1a\x1d.google.longrunning.Operation"\xd0\x01\x82\xd3\xe4\x93\x02U"P/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose:\x01*\xda\x41 project_id, region, cluster_name\xca\x41O\n\x15google.protobuf.Empty\x12\x36google.cloud.dataproc.v1beta2.ClusterOperationMetadata\x1aK\xca\x41\x17\x64\x61taproc.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB{\n!com.google.cloud.dataproc.v1beta2B\rClustersProtoP\x01ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataprocb\x06proto3' + '\n2google/cloud/dataproc_v1beta2/proto/clusters.proto\x12\x1dgoogle.cloud.dataproc.v1beta2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x30google/cloud/dataproc_v1beta2/proto/shared.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xe6\x03\n\x07\x43luster\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x41\n\x06\x63onfig\x18\x03 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.ClusterConfigB\x03\xe0\x41\x02\x12G\n\x06labels\x18\x08 \x03(\x0b\x32\x32.google.cloud.dataproc.v1beta2.Cluster.LabelsEntryB\x03\xe0\x41\x01\x12\x41\n\x06status\x18\x04 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.ClusterStatusB\x03\xe0\x41\x03\x12I\n\x0estatus_history\x18\x07 \x03(\x0b\x32,.google.cloud.dataproc.v1beta2.ClusterStatusB\x03\xe0\x41\x03\x12\x19\n\x0c\x63luster_uuid\x18\x06 \x01(\tB\x03\xe0\x41\x03\x12\x43\n\x07metrics\x18\t \x01(\x0b\x32-.google.cloud.dataproc.v1beta2.ClusterMetricsB\x03\xe0\x41\x03\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xaf\x07\n\rClusterConfig\x12\x1a\n\rconfig_bucket\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12P\n\x12gce_cluster_config\x18\x08 \x01(\x0b\x32/.google.cloud.dataproc.v1beta2.GceClusterConfigB\x03\xe0\x41\x01\x12N\n\rmaster_config\x18\t \x01(\x0b\x32\x32.google.cloud.dataproc.v1beta2.InstanceGroupConfigB\x03\xe0\x41\x01\x12N\n\rworker_config\x18\n \x01(\x0b\x32\x32.google.cloud.dataproc.v1beta2.InstanceGroupConfigB\x03\xe0\x41\x01\x12X\n\x17secondary_worker_config\x18\x0c \x01(\x0b\x32\x32.google.cloud.dataproc.v1beta2.InstanceGroupConfigB\x03\xe0\x41\x01\x12K\n\x0fsoftware_config\x18\r \x01(\x0b\x32-.google.cloud.dataproc.v1beta2.SoftwareConfigB\x03\xe0\x41\x01\x12M\n\x10lifecycle_config\x18\x0e \x01(\x0b\x32..google.cloud.dataproc.v1beta2.LifecycleConfigB\x03\xe0\x41\x01\x12\\\n\x16initialization_actions\x18\x0b \x03(\x0b\x32\x37.google.cloud.dataproc.v1beta2.NodeInitializationActionB\x03\xe0\x41\x01\x12O\n\x11\x65ncryption_config\x18\x0f \x01(\x0b\x32/.google.cloud.dataproc.v1beta2.EncryptionConfigB\x03\xe0\x41\x01\x12Q\n\x12\x61utoscaling_config\x18\x10 \x01(\x0b\x32\x30.google.cloud.dataproc.v1beta2.AutoscalingConfigB\x03\xe0\x41\x01\x12K\n\x0f\x65ndpoint_config\x18\x11 \x01(\x0b\x32-.google.cloud.dataproc.v1beta2.EndpointConfigB\x03\xe0\x41\x01\x12K\n\x0fsecurity_config\x18\x12 \x01(\x0b\x32-.google.cloud.dataproc.v1beta2.SecurityConfigB\x03\xe0\x41\x01"\xbf\x01\n\x0e\x45ndpointConfig\x12U\n\nhttp_ports\x18\x01 \x03(\x0b\x32<.google.cloud.dataproc.v1beta2.EndpointConfig.HttpPortsEntryB\x03\xe0\x41\x03\x12$\n\x17\x65nable_http_port_access\x18\x02 \x01(\x08\x42\x03\xe0\x41\x01\x1a\x30\n\x0eHttpPortsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01",\n\x11\x41utoscalingConfig\x12\x17\n\npolicy_uri\x18\x01 \x01(\tB\x03\xe0\x41\x01"4\n\x10\x45ncryptionConfig\x12 \n\x13gce_pd_kms_key_name\x18\x01 \x01(\tB\x03\xe0\x41\x01"\xa9\x03\n\x10GceClusterConfig\x12\x15\n\x08zone_uri\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0bnetwork_uri\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0esubnetwork_uri\x18\x06 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10internal_ip_only\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12\x1c\n\x0fservice_account\x18\x08 \x01(\tB\x03\xe0\x41\x01\x12#\n\x16service_account_scopes\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x0c\n\x04tags\x18\x04 \x03(\t\x12O\n\x08metadata\x18\x05 \x03(\x0b\x32=.google.cloud.dataproc.v1beta2.GceClusterConfig.MetadataEntry\x12U\n\x14reservation_affinity\x18\x0b \x01(\x0b\x32\x32.google.cloud.dataproc.v1beta2.ReservationAffinityB\x03\xe0\x41\x01\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xa4\x03\n\x13InstanceGroupConfig\x12\x1a\n\rnum_instances\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1b\n\x0einstance_names\x18\x02 \x03(\tB\x03\xe0\x41\x03\x12\x16\n\timage_uri\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10machine_type_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x43\n\x0b\x64isk_config\x18\x05 \x01(\x0b\x32).google.cloud.dataproc.v1beta2.DiskConfigB\x03\xe0\x41\x01\x12\x1b\n\x0eis_preemptible\x18\x06 \x01(\x08\x42\x03\xe0\x41\x01\x12T\n\x14managed_group_config\x18\x07 \x01(\x0b\x32\x31.google.cloud.dataproc.v1beta2.ManagedGroupConfigB\x03\xe0\x41\x03\x12K\n\x0c\x61\x63\x63\x65lerators\x18\x08 \x03(\x0b\x32\x30.google.cloud.dataproc.v1beta2.AcceleratorConfigB\x03\xe0\x41\x01\x12\x18\n\x10min_cpu_platform\x18\t \x01(\t"c\n\x12ManagedGroupConfig\x12#\n\x16instance_template_name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12(\n\x1binstance_group_manager_name\x18\x02 \x01(\tB\x03\xe0\x41\x03"L\n\x11\x41\x63\x63\x65leratorConfig\x12\x1c\n\x14\x61\x63\x63\x65lerator_type_uri\x18\x01 \x01(\t\x12\x19\n\x11\x61\x63\x63\x65lerator_count\x18\x02 \x01(\x05"a\n\nDiskConfig\x12\x1b\n\x0e\x62oot_disk_type\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x1e\n\x11\x62oot_disk_size_gb\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01\x12\x16\n\x0enum_local_ssds\x18\x02 \x01(\x05"\x83\x02\n\x0fLifecycleConfig\x12\x37\n\x0fidle_delete_ttl\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01\x12;\n\x10\x61uto_delete_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x01H\x00\x12\x39\n\x0f\x61uto_delete_ttl\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01H\x00\x12\x38\n\x0fidle_start_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x42\x05\n\x03ttl"X\n\x0eSecurityConfig\x12\x46\n\x0fkerberos_config\x18\x01 \x01(\x0b\x32-.google.cloud.dataproc.v1beta2.KerberosConfig"\x90\x04\n\x0eKerberosConfig\x12\x1c\n\x0f\x65nable_kerberos\x18\x01 \x01(\x08\x42\x03\xe0\x41\x01\x12(\n\x1broot_principal_password_uri\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x18\n\x0bkms_key_uri\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0ckeystore_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0etruststore_uri\x18\x05 \x01(\tB\x03\xe0\x41\x01\x12"\n\x15keystore_password_uri\x18\x06 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10key_password_uri\x18\x07 \x01(\tB\x03\xe0\x41\x01\x12$\n\x17truststore_password_uri\x18\x08 \x01(\tB\x03\xe0\x41\x01\x12$\n\x17\x63ross_realm_trust_realm\x18\t \x01(\tB\x03\xe0\x41\x01\x12"\n\x15\x63ross_realm_trust_kdc\x18\n \x01(\tB\x03\xe0\x41\x01\x12+\n\x1e\x63ross_realm_trust_admin_server\x18\x0b \x01(\tB\x03\xe0\x41\x01\x12\x32\n%cross_realm_trust_shared_password_uri\x18\x0c \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0ekdc_db_key_uri\x18\r \x01(\tB\x03\xe0\x41\x01\x12\x1f\n\x12tgt_lifetime_hours\x18\x0e \x01(\x05\x42\x03\xe0\x41\x01\x12\x12\n\x05realm\x18\x0f \x01(\tB\x03\xe0\x41\x01"s\n\x18NodeInitializationAction\x12\x1c\n\x0f\x65xecutable_file\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x39\n\x11\x65xecution_timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01"\x8b\x03\n\rClusterStatus\x12\x46\n\x05state\x18\x01 \x01(\x0e\x32\x32.google.cloud.dataproc.v1beta2.ClusterStatus.StateB\x03\xe0\x41\x03\x12\x13\n\x06\x64\x65tail\x18\x02 \x01(\tB\x03\xe0\x41\x03\x12\x39\n\x10state_start_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12L\n\x08substate\x18\x04 \x01(\x0e\x32\x35.google.cloud.dataproc.v1beta2.ClusterStatus.SubstateB\x03\xe0\x41\x03"V\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\t\n\x05\x45RROR\x10\x03\x12\x0c\n\x08\x44\x45LETING\x10\x04\x12\x0c\n\x08UPDATING\x10\x05"<\n\x08Substate\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\r\n\tUNHEALTHY\x10\x01\x12\x10\n\x0cSTALE_STATUS\x10\x02"\xfe\x01\n\x0eSoftwareConfig\x12\x1a\n\rimage_version\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12V\n\nproperties\x18\x02 \x03(\x0b\x32=.google.cloud.dataproc.v1beta2.SoftwareConfig.PropertiesEntryB\x03\xe0\x41\x01\x12\x45\n\x13optional_components\x18\x03 \x03(\x0e\x32(.google.cloud.dataproc.v1beta2.Component\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xa4\x02\n\x0e\x43lusterMetrics\x12T\n\x0chdfs_metrics\x18\x01 \x03(\x0b\x32>.google.cloud.dataproc.v1beta2.ClusterMetrics.HdfsMetricsEntry\x12T\n\x0cyarn_metrics\x18\x02 \x03(\x0b\x32>.google.cloud.dataproc.v1beta2.ClusterMetrics.YarnMetricsEntry\x1a\x32\n\x10HdfsMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1a\x32\n\x10YarnMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01"\x9b\x01\n\x14\x43reateClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12<\n\x07\x63luster\x18\x02 \x01(\x0b\x32&.google.cloud.dataproc.v1beta2.ClusterB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x04 \x01(\tB\x03\xe0\x41\x01"\xb3\x02\n\x14UpdateClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x05 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12<\n\x07\x63luster\x18\x03 \x01(\x0b\x32&.google.cloud.dataproc.v1beta2.ClusterB\x03\xe0\x41\x02\x12\x45\n\x1dgraceful_decommission_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01\x12\x34\n\x0bupdate_mask\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x07 \x01(\tB\x03\xe0\x41\x01"\x93\x01\n\x14\x44\x65leteClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_uuid\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x17\n\nrequest_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\\\n\x11GetClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02"\x89\x01\n\x13ListClustersRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x04 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06\x66ilter\x18\x05 \x01(\tB\x03\xe0\x41\x01\x12\x16\n\tpage_size\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x17\n\npage_token\x18\x03 \x01(\tB\x03\xe0\x41\x01"s\n\x14ListClustersResponse\x12=\n\x08\x63lusters\x18\x01 \x03(\x0b\x32&.google.cloud.dataproc.v1beta2.ClusterB\x03\xe0\x41\x03\x12\x1c\n\x0fnext_page_token\x18\x02 \x01(\tB\x03\xe0\x41\x03"a\n\x16\x44iagnoseClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02"1\n\x16\x44iagnoseClusterResults\x12\x17\n\noutput_uri\x18\x01 \x01(\tB\x03\xe0\x41\x03"\xfd\x01\n\x13ReservationAffinity\x12^\n\x18\x63onsume_reservation_type\x18\x01 \x01(\x0e\x32\x37.google.cloud.dataproc.v1beta2.ReservationAffinity.TypeB\x03\xe0\x41\x01\x12\x10\n\x03key\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x13\n\x06values\x18\x03 \x03(\tB\x03\xe0\x41\x01"_\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x12\n\x0eNO_RESERVATION\x10\x01\x12\x13\n\x0f\x41NY_RESERVATION\x10\x02\x12\x18\n\x14SPECIFIC_RESERVATION\x10\x03\x32\xe7\r\n\x11\x43lusterController\x12\x91\x02\n\rCreateCluster\x12\x33.google.cloud.dataproc.v1beta2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"\xab\x01\x82\xd3\xe4\x93\x02\x43"8/v1beta2/projects/{project_id}/regions/{region}/clusters:\x07\x63luster\xda\x41\x1bproject_id, region, cluster\xca\x41\x41\n\x07\x43luster\x12\x36google.cloud.dataproc.v1beta2.ClusterOperationMetadata\x12\xbb\x02\n\rUpdateCluster\x12\x33.google.cloud.dataproc.v1beta2.UpdateClusterRequest\x1a\x1d.google.longrunning.Operation"\xd5\x01\x82\xd3\xe4\x93\x02R2G/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}:\x07\x63luster\xda\x41\x36project_id, region, cluster_name, cluster, update_mask\xca\x41\x41\n\x07\x43luster\x12\x36google.cloud.dataproc.v1beta2.ClusterOperationMetadata\x12\xaa\x02\n\rDeleteCluster\x12\x33.google.cloud.dataproc.v1beta2.DeleteClusterRequest\x1a\x1d.google.longrunning.Operation"\xc4\x01\x82\xd3\xe4\x93\x02I*G/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}\xda\x41 project_id, region, cluster_name\xca\x41O\n\x15google.protobuf.Empty\x12\x36google.cloud.dataproc.v1beta2.ClusterOperationMetadata\x12\xda\x01\n\nGetCluster\x12\x30.google.cloud.dataproc.v1beta2.GetClusterRequest\x1a&.google.cloud.dataproc.v1beta2.Cluster"r\x82\xd3\xe4\x93\x02I\x12G/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}\xda\x41 project_id, region, cluster_name\x12\xeb\x01\n\x0cListClusters\x12\x32.google.cloud.dataproc.v1beta2.ListClustersRequest\x1a\x33.google.cloud.dataproc.v1beta2.ListClustersResponse"r\x82\xd3\xe4\x93\x02:\x12\x38/v1beta2/projects/{project_id}/regions/{region}/clusters\xda\x41\x12project_id, region\xda\x41\x1aproject_id, region, filter\x12\xba\x02\n\x0f\x44iagnoseCluster\x12\x35.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest\x1a\x1d.google.longrunning.Operation"\xd0\x01\x82\xd3\xe4\x93\x02U"P/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose:\x01*\xda\x41 project_id, region, cluster_name\xca\x41O\n\x15google.protobuf.Empty\x12\x36google.cloud.dataproc.v1beta2.ClusterOperationMetadata\x1aK\xca\x41\x17\x64\x61taproc.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB{\n!com.google.cloud.dataproc.v1beta2B\rClustersProtoP\x01ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataprocb\x06proto3' ), dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_operations__pb2.DESCRIPTOR, + google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_shared__pb2.DESCRIPTOR, google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, @@ -84,8 +82,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4509, - serialized_end=4595, + serialized_start=4492, + serialized_end=4578, ) _sym_db.RegisterEnumDescriptor(_CLUSTERSTATUS_STATE) @@ -107,8 +105,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4597, - serialized_end=4657, + serialized_start=4580, + serialized_end=4640, ) _sym_db.RegisterEnumDescriptor(_CLUSTERSTATUS_SUBSTATE) @@ -145,8 +143,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=6489, - serialized_end=6584, + serialized_start=6472, + serialized_end=6567, ) _sym_db.RegisterEnumDescriptor(_RESERVATIONAFFINITY_TYPE) @@ -203,8 +201,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=855, - serialized_end=900, + serialized_start=828, + serialized_end=873, ) _CLUSTER = _descriptor.Descriptor( @@ -367,8 +365,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=414, - serialized_end=900, + serialized_start=387, + serialized_end=873, ) @@ -604,8 +602,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=903, - serialized_end=1846, + serialized_start=876, + serialized_end=1819, ) @@ -661,8 +659,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1992, - serialized_end=2040, + serialized_start=1965, + serialized_end=2013, ) _ENDPOINTCONFIG = _descriptor.Descriptor( @@ -717,8 +715,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1849, - serialized_end=2040, + serialized_start=1822, + serialized_end=2013, ) @@ -756,8 +754,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2042, - serialized_end=2086, + serialized_start=2015, + serialized_end=2059, ) @@ -795,8 +793,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2088, - serialized_end=2140, + serialized_start=2061, + serialized_end=2113, ) @@ -852,8 +850,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2521, - serialized_end=2568, + serialized_start=2494, + serialized_end=2541, ) _GCECLUSTERCONFIG = _descriptor.Descriptor( @@ -1034,8 +1032,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2143, - serialized_end=2568, + serialized_start=2116, + serialized_end=2541, ) @@ -1217,8 +1215,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2571, - serialized_end=2991, + serialized_start=2544, + serialized_end=2964, ) @@ -1274,8 +1272,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2993, - serialized_end=3092, + serialized_start=2966, + serialized_end=3065, ) @@ -1331,8 +1329,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3094, - serialized_end=3170, + serialized_start=3067, + serialized_end=3143, ) @@ -1406,8 +1404,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3172, - serialized_end=3269, + serialized_start=3145, + serialized_end=3242, ) @@ -1451,7 +1449,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1469,7 +1467,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1507,8 +1505,8 @@ fields=[], ) ], - serialized_start=3272, - serialized_end=3521, + serialized_start=3245, + serialized_end=3504, ) @@ -1546,8 +1544,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3523, - serialized_end=3611, + serialized_start=3506, + serialized_end=3594, ) @@ -1837,8 +1835,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3614, - serialized_end=4142, + serialized_start=3597, + serialized_end=4125, ) @@ -1894,8 +1892,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4144, - serialized_end=4259, + serialized_start=4127, + serialized_end=4242, ) @@ -1987,8 +1985,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4262, - serialized_end=4657, + serialized_start=4245, + serialized_end=4640, ) @@ -2044,8 +2042,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4865, - serialized_end=4914, + serialized_start=4848, + serialized_end=4897, ) _SOFTWARECONFIG = _descriptor.Descriptor( @@ -2118,8 +2116,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4660, - serialized_end=4914, + serialized_start=4643, + serialized_end=4897, ) @@ -2175,8 +2173,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5107, - serialized_end=5157, + serialized_start=5090, + serialized_end=5140, ) _CLUSTERMETRICS_YARNMETRICSENTRY = _descriptor.Descriptor( @@ -2231,8 +2229,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5159, - serialized_end=5209, + serialized_start=5142, + serialized_end=5192, ) _CLUSTERMETRICS = _descriptor.Descriptor( @@ -2287,8 +2285,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4917, - serialized_end=5209, + serialized_start=4900, + serialized_end=5192, ) @@ -2380,8 +2378,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5212, - serialized_end=5367, + serialized_start=5195, + serialized_end=5350, ) @@ -2527,8 +2525,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5370, - serialized_end=5677, + serialized_start=5353, + serialized_end=5660, ) @@ -2638,8 +2636,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5680, - serialized_end=5827, + serialized_start=5663, + serialized_end=5810, ) @@ -2713,8 +2711,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5829, - serialized_end=5921, + serialized_start=5812, + serialized_end=5904, ) @@ -2824,8 +2822,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5924, - serialized_end=6061, + serialized_start=5907, + serialized_end=6044, ) @@ -2881,8 +2879,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6063, - serialized_end=6178, + serialized_start=6046, + serialized_end=6161, ) @@ -2956,8 +2954,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6180, - serialized_end=6277, + serialized_start=6163, + serialized_end=6260, ) @@ -2995,8 +2993,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6279, - serialized_end=6328, + serialized_start=6262, + serialized_end=6311, ) @@ -3070,8 +3068,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6331, - serialized_end=6584, + serialized_start=6314, + serialized_end=6567, ) _CLUSTER_LABELSENTRY.containing_type = _CLUSTER @@ -3231,7 +3229,7 @@ Required. The cluster name. Cluster names within a project must be unique. Names of deleted clusters can be reused. config: - Required. The cluster config. Note that Cloud Dataproc may set + Required. The cluster config. Note that Dataproc may set default values, and values may change when clusters are updated. labels: @@ -3248,8 +3246,7 @@ Output only. The previous cluster status. cluster_uuid: Output only. A cluster UUID (Unique Universal Identifier). - Cloud Dataproc generates this value when it creates the - cluster. + Dataproc generates this value when it creates the cluster. metrics: Output only. Contains cluster daemon metrics such as HDFS and YARN stats. **Beta Feature**: This report is available for @@ -3272,15 +3269,15 @@ Attributes: config_bucket: - Optional. A Google Cloud Storage bucket used to stage job + Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your - cluster's staging bucket according to the Google Compute - Engine zone where your cluster is deployed, and then create - and manage this project-level, per-location bucket (see `Cloud - Dataproc staging bucket `__). + cluster's staging bucket according to the Compute Engine zone + where your cluster is deployed, and then create and manage + this project-level, per-location bucket (see `Dataproc staging + bucket `__). gce_cluster_config: Optional. The shared Compute Engine config settings for all instances in a cluster. @@ -3372,7 +3369,7 @@ rojects/[project_id]/locations/[dataproc_region]/autoscalingPo licies/[policy_id]`` - ``projects/[project_id]/locations/[dat aproc_region]/autoscalingPolicies/[policy_id]`` Note that the - policy must be in the same project and Cloud Dataproc region. + policy must be in the same project and Dataproc region. """, # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.AutoscalingConfig) ), @@ -3421,7 +3418,7 @@ zone_uri: Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" - region. If omitted in a non-global Cloud Dataproc region, the + region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: - ``htt @@ -3457,14 +3454,15 @@ networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. service_account: - Optional. The service account of the instances. Defaults to - the default Compute Engine service account. Custom service - accounts need permissions equivalent to the following IAM - roles: - roles/logging.logWriter - - roles/storage.objectAdmin (see - https://cloud.google.com/compute/docs/access/service- - accounts#custom\_service\_accounts for more information). - Example: ``[account_id]@[project_id].iam.gserviceaccount.com`` + Optional. The `Dataproc service account + `__ (also see `VM + Data Plane identity `__) used by + Dataproc cluster VM instances to access Google Cloud Platform + services. If not specified, the `Compute Engine default + service account `__ is used. service_account_scopes: Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is @@ -3510,9 +3508,9 @@ Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1. instance_names: - Output only. The list of instance names. Cloud Dataproc - derives the names from ``cluster_name``, ``num_instances``, - and the instance group. + Output only. The list of instance names. Dataproc derives the + names from ``cluster_name``, ``num_instances``, and the + instance group. image_uri: Optional. The Compute Engine image resource used for cluster instances. It can be specified or may be inferred from @@ -3524,8 +3522,8 @@ /[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`` - ``projects/[project_id]/zones/us- east1-a/machineTypes/n1-standard-2`` - ``n1-standard-2`` - **Auto Zone Exception**: If you are using the Cloud Dataproc - `Auto Zone Placement `__ feature, you must use the short name of the machine type resource, for example, ``n1-standard-2``. @@ -3543,7 +3541,7 @@ these instances. min_cpu_platform: Specifies the minimum cpu platform for the Instance Group. See - [Cloud Dataproc→Minimum CPU Platform] + [Dataproc→Minimum CPU Platform] (/dataproc/docs/concepts/compute/dataproc-min-cpu). """, # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.InstanceGroupConfig) @@ -3596,7 +3594,7 @@ ``projects/[project_id]/zones/us- east1-a/acceleratorTypes/nvidia-tesla-k80`` \* ``nvidia- tesla-k80`` **Auto Zone Exception**: If you are using the - Cloud Dataproc `Auto Zone Placement + Dataproc `Auto Zone Placement `__ feature, you must use the short name of the accelerator type resource, for example, @@ -3651,25 +3649,32 @@ Attributes: idle_delete_ttl: - Optional. The duration to keep the cluster alive while idling. - Passing this threshold will cause the cluster to be deleted. - Valid range: **[10m, 14d]**. Example: **"10m"**, the minimum - value, to delete the cluster when it has had no jobs running - for 10 minutes. + Optional. The duration to keep the cluster alive while idling + (when no jobs are running). Passing this threshold will cause + the cluster to be deleted. Minimum value is 10 minutes; + maximum value is 14 days (see JSON representation of `Duration + `__. ttl: Either the exact time the cluster should be deleted at or the cluster maximum age. auto_delete_time: - Optional. The time when cluster will be auto-deleted. + Optional. The time when cluster will be auto-deleted. (see + JSON representation of `Timestamp + `__). auto_delete_ttl: Optional. The lifetime duration of cluster. The cluster will - be auto-deleted at the end of this period. Valid range: - **[10m, 14d]**. Example: **"1d"**, to delete the cluster 1 - day after its creation.. + be auto-deleted at the end of this period. Minimum value is 10 + minutes; maximum value is 14 days (see JSON representation of + `Duration `__). idle_start_time: Output only. The time when cluster became idle (most recent - job finished) and became eligible for deletion due to - idleness. + job finished) and became eligible for deletion due to idleness + (see JSON representation of `Timestamp + `__). """, # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.LifecycleConfig) ), @@ -3782,10 +3787,12 @@ Required. Cloud Storage URI of executable file. execution_timeout: Optional. Amount of time executable has to complete. Default - is 10 minutes. Cluster creation fails with an explanatory - error message (the name of the executable that caused the - error and the exceeded timeout period) if the executable is - not completed at end of the timeout period. + is 10 minutes (see JSON representation of `Duration + `__). Cluster creation fails with an + explanatory error message (the name of the executable that + caused the error and the exceeded timeout period) if the + executable is not completed at end of the timeout period. """, # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.NodeInitializationAction) ), @@ -3807,7 +3814,10 @@ detail: Output only. Optional details of cluster's state. state_start_time: - Output only. Time when this state was entered. + Output only. Time when this state was entered (see JSON + representation of `Timestamp + `__). substate: Output only. Additional state information that includes status reported by the agent. @@ -3839,7 +3849,7 @@ Attributes: image_version: Optional. The version of software inside the cluster. It must - be one of the supported `Cloud Dataproc Versions + be one of the supported `Dataproc Versions `__, such as "1.2" (including a subminor version, such as "1.2.29"), or the @@ -3925,8 +3935,7 @@ Required. The ID of the Google Cloud Platform project that the cluster belongs to. region: - Required. The Cloud Dataproc region in which to handle the - request. + Required. The Dataproc region in which to handle the request. cluster: Required. The cluster to create. request_id: @@ -3961,8 +3970,7 @@ Required. The ID of the Google Cloud Platform project the cluster belongs to. region: - Required. The Cloud Dataproc region in which to handle the - request. + Required. The Dataproc region in which to handle the request. cluster_name: Required. The cluster name. cluster: @@ -3974,8 +3982,10 @@ wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum allowed timeout - is 1 day. Only supported on Dataproc image versions 1.2 and - higher. + is 1 day (see JSON representation of `Duration + `__). Only supported on Dataproc + image versions 1.2 and higher. update_mask: Required. Specifies the path, relative to ``Cluster``, of the field to update. For example, to change the number of workers @@ -4053,8 +4063,7 @@ Required. The ID of the Google Cloud Platform project that the cluster belongs to. region: - Required. The Cloud Dataproc region in which to handle the - request. + Required. The Dataproc region in which to handle the request. cluster_name: Required. The cluster name. cluster_uuid: @@ -4094,8 +4103,7 @@ Required. The ID of the Google Cloud Platform project that the cluster belongs to. region: - Required. The Cloud Dataproc region in which to handle the - request. + Required. The Dataproc region in which to handle the request. cluster_name: Required. The cluster name. """, @@ -4118,8 +4126,7 @@ Required. The ID of the Google Cloud Platform project that the cluster belongs to. region: - Required. The Cloud Dataproc region in which to handle the - request. + Required. The Dataproc region in which to handle the request. filter: Optional. A filter constraining the clusters to list. Filters are case-sensitive and have the following syntax: field = @@ -4184,8 +4191,7 @@ Required. The ID of the Google Cloud Platform project that the cluster belongs to. region: - Required. The Cloud Dataproc region in which to handle the - request. + Required. The Dataproc region in which to handle the request. cluster_name: Required. The cluster name. """, @@ -4287,6 +4293,8 @@ _DISKCONFIG.fields_by_name["boot_disk_type"]._options = None _DISKCONFIG.fields_by_name["boot_disk_size_gb"]._options = None _LIFECYCLECONFIG.fields_by_name["idle_delete_ttl"]._options = None +_LIFECYCLECONFIG.fields_by_name["auto_delete_time"]._options = None +_LIFECYCLECONFIG.fields_by_name["auto_delete_ttl"]._options = None _LIFECYCLECONFIG.fields_by_name["idle_start_time"]._options = None _KERBEROSCONFIG.fields_by_name["enable_kerberos"]._options = None _KERBEROSCONFIG.fields_by_name["root_principal_password_uri"]._options = None @@ -4356,8 +4364,8 @@ serialized_options=_b( "\312A\027dataproc.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" ), - serialized_start=6587, - serialized_end=8354, + serialized_start=6570, + serialized_end=8337, methods=[ _descriptor.MethodDescriptor( name="CreateCluster", diff --git a/google/cloud/dataproc_v1beta2/proto/clusters_pb2_grpc.py b/google/cloud/dataproc_v1beta2/proto/clusters_pb2_grpc.py index de982140..7623f9d7 100644 --- a/google/cloud/dataproc_v1beta2/proto/clusters_pb2_grpc.py +++ b/google/cloud/dataproc_v1beta2/proto/clusters_pb2_grpc.py @@ -60,7 +60,7 @@ class ClusterControllerServicer(object): def CreateCluster(self, request, context): """Creates a cluster in a project. The returned [Operation.metadata][google.longrunning.Operation.metadata] will be - [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). + [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") @@ -69,7 +69,7 @@ def CreateCluster(self, request, context): def UpdateCluster(self, request, context): """Updates a cluster in a project. The returned [Operation.metadata][google.longrunning.Operation.metadata] will be - [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). + [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") @@ -78,7 +78,7 @@ def UpdateCluster(self, request, context): def DeleteCluster(self, request, context): """Deletes a cluster in a project. The returned [Operation.metadata][google.longrunning.Operation.metadata] will be - [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). + [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") @@ -101,11 +101,11 @@ def ListClusters(self, request, context): def DiagnoseCluster(self, request, context): """Gets cluster diagnostic information. The returned [Operation.metadata][google.longrunning.Operation.metadata] will be - [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). + [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). After the operation completes, [Operation.response][google.longrunning.Operation.response] contains - [Empty](google.protobuf.Empty). + [Empty][google.protobuf.Empty]. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") diff --git a/google/cloud/dataproc_v1beta2/proto/jobs.proto b/google/cloud/dataproc_v1beta2/proto/jobs.proto index c1e643c9..3208822f 100644 --- a/google/cloud/dataproc_v1beta2/proto/jobs.proto +++ b/google/cloud/dataproc_v1beta2/proto/jobs.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -20,6 +19,7 @@ package google.cloud.dataproc.v1beta2; import "google/api/annotations.proto"; import "google/api/client.proto"; import "google/api/field_behavior.proto"; +import "google/longrunning/operations.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; @@ -70,9 +70,9 @@ service JobController { // Starts a job cancellation request. To access the job resource // after cancellation, call - // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) + // [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) // or - // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). + // [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). rpc CancelJob(CancelJobRequest) returns (Job) { option (google.api.http) = { post: "/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}:cancel" @@ -132,7 +132,7 @@ message LoggingConfig { map driver_log_levels = 2; } -// A Cloud Dataproc job for running +// A Dataproc job for running // [Apache Hadoop // MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) // jobs on [Apache Hadoop @@ -159,33 +159,33 @@ message HadoopJob { // include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as // job properties, since a collision may occur that causes an incorrect job // submission. - repeated string args = 3; + repeated string args = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. Jar file URIs to add to the CLASSPATHs of the // Hadoop driver and tasks. - repeated string jar_file_uris = 4; + repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied // to the working directory of Hadoop drivers and distributed tasks. Useful // for naively parallel tasks. - repeated string file_uris = 5; + repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of archives to be extracted in the working directory of // Hadoop drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, or .zip. - repeated string archive_uris = 6; + repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names to values, used to configure Hadoop. - // Properties that conflict with values set by the Cloud Dataproc API may be + // Properties that conflict with values set by the Dataproc API may be // overwritten. Can include properties set in /etc/hadoop/conf/*-site and // classes in user code. - map properties = 7; + map properties = 7 [(google.api.field_behavior) = OPTIONAL]; // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 8; + LoggingConfig logging_config = 8 [(google.api.field_behavior) = OPTIONAL]; } -// A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) +// A Dataproc job for running [Apache Spark](http://spark.apache.org/) // applications on YARN. // The specification of the main method to call to drive the job. // Specify either the jar file that contains the main class or the main class @@ -205,32 +205,32 @@ message SparkJob { // Optional. The arguments to pass to the driver. Do not include arguments, // such as `--conf`, that can be set as job properties, since a collision may // occur that causes an incorrect job submission. - repeated string args = 3; + repeated string args = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the // Spark driver and tasks. - repeated string jar_file_uris = 4; + repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of files to be copied to the working directory of // Spark drivers and distributed tasks. Useful for naively parallel tasks. - repeated string file_uris = 5; + repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of archives to be extracted in the working directory // of Spark drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, and .zip. - repeated string archive_uris = 6; + repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names to values, used to configure Spark. - // Properties that conflict with values set by the Cloud Dataproc API may be + // Properties that conflict with values set by the Dataproc API may be // overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. - map properties = 7; + map properties = 7 [(google.api.field_behavior) = OPTIONAL]; // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 8; + LoggingConfig logging_config = 8 [(google.api.field_behavior) = OPTIONAL]; } -// A Cloud Dataproc job for running +// A Dataproc job for running // [Apache // PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) // applications on YARN. @@ -242,32 +242,32 @@ message PySparkJob { // Optional. The arguments to pass to the driver. Do not include arguments, // such as `--conf`, that can be set as job properties, since a collision may // occur that causes an incorrect job submission. - repeated string args = 2; + repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS file URIs of Python files to pass to the PySpark // framework. Supported file types: .py, .egg, and .zip. - repeated string python_file_uris = 3; + repeated string python_file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the // Python driver and tasks. - repeated string jar_file_uris = 4; + repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of files to be copied to the working directory of // Python drivers and distributed tasks. Useful for naively parallel tasks. - repeated string file_uris = 5; + repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of archives to be extracted in the working directory of // .jar, .tar, .tar.gz, .tgz, and .zip. - repeated string archive_uris = 6; + repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names to values, used to configure PySpark. - // Properties that conflict with values set by the Cloud Dataproc API may be + // Properties that conflict with values set by the Dataproc API may be // overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. - map properties = 7; + map properties = 7 [(google.api.field_behavior) = OPTIONAL]; // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 8; + LoggingConfig logging_config = 8 [(google.api.field_behavior) = OPTIONAL]; } // A list of queries to run on a cluster. @@ -289,7 +289,7 @@ message QueryList { repeated string queries = 1 [(google.api.field_behavior) = REQUIRED]; } -// A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) +// A Dataproc job for running [Apache Hive](https://hive.apache.org/) // queries on YARN. message HiveJob { // Required. The sequence of Hive queries to execute, specified as either @@ -305,25 +305,25 @@ message HiveJob { // Optional. Whether to continue executing queries if a query fails. // The default value is `false`. Setting to `true` can be useful when // executing independent parallel queries. - bool continue_on_failure = 3; + bool continue_on_failure = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. Mapping of query variable names to values (equivalent to the // Hive command: `SET name="value";`). - map script_variables = 4; + map script_variables = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names and values, used to configure Hive. - // Properties that conflict with values set by the Cloud Dataproc API may be + // Properties that conflict with values set by the Dataproc API may be // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, // /etc/hive/conf/hive-site.xml, and classes in user code. - map properties = 5; + map properties = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of jar files to add to the CLASSPATH of the // Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes // and UDFs. - repeated string jar_file_uris = 6; + repeated string jar_file_uris = 6 [(google.api.field_behavior) = OPTIONAL]; } -// A Cloud Dataproc job for running [Apache Spark +// A Dataproc job for running [Apache Spark // SQL](http://spark.apache.org/sql/) queries. message SparkSqlJob { // Required. The sequence of Spark SQL queries to execute, specified as @@ -338,21 +338,21 @@ message SparkSqlJob { // Optional. Mapping of query variable names to values (equivalent to the // Spark SQL command: SET `name="value";`). - map script_variables = 3; + map script_variables = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names to values, used to configure // Spark SQL's SparkConf. Properties that conflict with values set by the - // Cloud Dataproc API may be overwritten. - map properties = 4; + // Dataproc API may be overwritten. + map properties = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. - repeated string jar_file_uris = 56; + repeated string jar_file_uris = 56 [(google.api.field_behavior) = OPTIONAL]; // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 6; + LoggingConfig logging_config = 6 [(google.api.field_behavior) = OPTIONAL]; } -// A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) +// A Dataproc job for running [Apache Pig](https://pig.apache.org/) // queries on YARN. message PigJob { // Required. The sequence of Pig queries to execute, specified as an HCFS @@ -368,27 +368,27 @@ message PigJob { // Optional. Whether to continue executing queries if a query fails. // The default value is `false`. Setting to `true` can be useful when // executing independent parallel queries. - bool continue_on_failure = 3; + bool continue_on_failure = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. Mapping of query variable names to values (equivalent to the Pig // command: `name=[value]`). - map script_variables = 4; + map script_variables = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names to values, used to configure Pig. - // Properties that conflict with values set by the Cloud Dataproc API may be + // Properties that conflict with values set by the Dataproc API may be // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, // /etc/pig/conf/pig.properties, and classes in user code. - map properties = 5; + map properties = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of jar files to add to the CLASSPATH of // the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. - repeated string jar_file_uris = 6; + repeated string jar_file_uris = 6 [(google.api.field_behavior) = OPTIONAL]; // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 7; + LoggingConfig logging_config = 7 [(google.api.field_behavior) = OPTIONAL]; } -// A Cloud Dataproc job for running +// A Dataproc job for running // [Apache SparkR](https://spark.apache.org/docs/latest/sparkr.html) // applications on YARN. message SparkRJob { @@ -399,38 +399,38 @@ message SparkRJob { // Optional. The arguments to pass to the driver. Do not include arguments, // such as `--conf`, that can be set as job properties, since a collision may // occur that causes an incorrect job submission. - repeated string args = 2; + repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of files to be copied to the working directory of // R drivers and distributed tasks. Useful for naively parallel tasks. - repeated string file_uris = 3; + repeated string file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of archives to be extracted in the working directory of // Spark drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, and .zip. - repeated string archive_uris = 4; + repeated string archive_uris = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names to values, used to configure SparkR. - // Properties that conflict with values set by the Cloud Dataproc API may be + // Properties that conflict with values set by the Dataproc API may be // overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. - map properties = 5; + map properties = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 6; + LoggingConfig logging_config = 6 [(google.api.field_behavior) = OPTIONAL]; } -// Cloud Dataproc job config. +// Dataproc job config. message JobPlacement { // Required. The name of the cluster where the job will be submitted. string cluster_name = 1 [(google.api.field_behavior) = REQUIRED]; - // Output only. A cluster UUID generated by the Cloud Dataproc service when + // Output only. A cluster UUID generated by the Dataproc service when // the job is submitted. - string cluster_uuid = 2; + string cluster_uuid = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; } -// Cloud Dataproc job status. +// Dataproc job status. message JobStatus { // The job state. enum State { @@ -488,7 +488,7 @@ message JobStatus { QUEUED = 2; // The agent-reported status is out of date, which may be caused by a - // loss of communication between the agent and Cloud Dataproc. If the + // loss of communication between the agent and Dataproc. If the // agent does not send a timely update, the job will fail. // // Applies to RUNNING state. @@ -496,18 +496,18 @@ message JobStatus { } // Output only. A state message specifying the overall job state. - State state = 1; + State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Output only. Optional job state details, such as an error + // Output only. Optional Job state details, such as an error // description if the state is ERROR. - string details = 2; + string details = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The time when this state was entered. - google.protobuf.Timestamp state_start_time = 6; + google.protobuf.Timestamp state_start_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Additional state information, which includes // status reported by the agent. - Substate substate = 7; + Substate substate = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; } // Encapsulates the full scoping used to reference a job. @@ -517,12 +517,11 @@ message JobReference { string project_id = 1 [(google.api.field_behavior) = REQUIRED]; // Optional. The job ID, which must be unique within the project. - // // The ID must contain only letters (a-z, A-Z), numbers (0-9), // underscores (_), or hyphens (-). The maximum length is 100 characters. // // If not specified by the caller, the job ID will be provided by the server. - string job_id = 2; + string job_id = 2 [(google.api.field_behavior) = OPTIONAL]; } // A YARN application created by a job. Application information is a subset of @@ -571,20 +570,20 @@ message YarnApplication { // Output only. The numerical progress of the application, from 1 to 100. float progress = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Optional. Output only. The HTTP URL of the ApplicationMaster, HistoryServer, or + // Output only. The HTTP URL of the ApplicationMaster, HistoryServer, or // TimelineServer that provides application-specific information. The URL uses // the internal hostname, and requires a proxy server for resolution and, // possibly, access. string tracking_url = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; } -// A Cloud Dataproc job resource. +// A Dataproc job resource. message Job { // Optional. The fully qualified reference to the job, which can be used to // obtain the equivalent REST path of the job resource. If this property // is not specified when a job is created, the server generates a // job_id. - JobReference reference = 1; + JobReference reference = 1 [(google.api.field_behavior) = OPTIONAL]; // Required. Job information, including how, when, and where to // run the job. @@ -592,54 +591,47 @@ message Job { // Required. The application/framework-specific portion of the job. oneof type_job { - // Job is a Hadoop job. HadoopJob hadoop_job = 3; - // Job is a Spark job. SparkJob spark_job = 4; - // Job is a Pyspark job. PySparkJob pyspark_job = 5; - // Job is a Hive job. HiveJob hive_job = 6; - // Job is a Pig job. PigJob pig_job = 7; - // Job is a SparkR job. SparkRJob spark_r_job = 21; - // Job is a SparkSql job. SparkSqlJob spark_sql_job = 12; } // Output only. The job status. Additional application-specific // status information may be contained in the type_job // and yarn_applications fields. - JobStatus status = 8; + JobStatus status = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The previous job status. - repeated JobStatus status_history = 13; + repeated JobStatus status_history = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The collection of YARN applications spun up by this job. // // **Beta** Feature: This report is available for testing purposes only. It // may be changed before final release. - repeated YarnApplication yarn_applications = 9; + repeated YarnApplication yarn_applications = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The email address of the user submitting the job. For jobs // submitted on the cluster, the address is username@hostname. - string submitted_by = 10; + string submitted_by = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. A URI pointing to the location of the stdout of the job's // driver program. - string driver_output_resource_uri = 17; + string driver_output_resource_uri = 17 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. If present, the location of miscellaneous control files // which may be used as part of job setup and handling. If not present, // control files may be placed in the same location as `driver_output_uri`. - string driver_control_files_uri = 15; + string driver_control_files_uri = 15 [(google.api.field_behavior) = OUTPUT_ONLY]; // Optional. The labels to associate with this job. // Label **keys** must contain 1 to 63 characters, and must conform to @@ -648,15 +640,15 @@ message Job { // characters, and must conform to [RFC // 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be // associated with a job. - map labels = 18; + map labels = 18 [(google.api.field_behavior) = OPTIONAL]; // Optional. Job scheduling configuration. - JobScheduling scheduling = 20; + JobScheduling scheduling = 20 [(google.api.field_behavior) = OPTIONAL]; // Output only. A UUID that uniquely identifies a job within the project // over time. This is in contrast to a user-settable reference.job_id that // may be reused over time. - string job_uuid = 22; + string job_uuid = 22 [(google.api.field_behavior) = OUTPUT_ONLY]; } // Job scheduling options. @@ -669,7 +661,7 @@ message JobScheduling { // 4 times within 10 minute window. // // Maximum value is 10. - int32 max_failures_per_hour = 1; + int32 max_failures_per_hour = 1 [(google.api.field_behavior) = OPTIONAL]; } // A request to submit a job. @@ -678,7 +670,7 @@ message SubmitJobRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The job resource. @@ -695,7 +687,7 @@ message SubmitJobRequest { // // The id must contain only letters (a-z, A-Z), numbers (0-9), // underscores (_), and hyphens (-). The maximum length is 40 characters. - string request_id = 4; + string request_id = 4 [(google.api.field_behavior) = OPTIONAL]; } // A request to get the resource representation for a job in a project. @@ -704,7 +696,7 @@ message GetJobRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The job ID. @@ -730,25 +722,25 @@ message ListJobsRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 6 [(google.api.field_behavior) = REQUIRED]; // Optional. The number of results to return in each response. - int32 page_size = 2; + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; // Optional. The page token, returned by a previous call, to request the // next page of results. - string page_token = 3; + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. If set, the returned jobs list includes only jobs that were // submitted to the named cluster. - string cluster_name = 4; + string cluster_name = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. Specifies enumerated categories of jobs to list. // (default = match ALL jobs). // // If `filter` is provided, `jobStateMatcher` will be ignored. - JobStateMatcher job_state_matcher = 5; + JobStateMatcher job_state_matcher = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. A filter constraining the jobs to list. Filters are // case-sensitive and have the following syntax: @@ -764,7 +756,7 @@ message ListJobsRequest { // Example filter: // // status.state = ACTIVE AND labels.env = staging AND labels.starred = * - string filter = 7; + string filter = 7 [(google.api.field_behavior) = OPTIONAL]; } // A request to update a job. @@ -773,7 +765,7 @@ message UpdateJobRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 2 [(google.api.field_behavior) = REQUIRED]; // Required. The job ID. @@ -794,12 +786,12 @@ message UpdateJobRequest { // A list of jobs in a project. message ListJobsResponse { // Output only. Jobs list. - repeated Job jobs = 1; + repeated Job jobs = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; // Optional. This token is included in the response if there are more results // to fetch. To fetch additional results, provide this value as the // `page_token` in a subsequent ListJobsRequest. - string next_page_token = 2; + string next_page_token = 2 [(google.api.field_behavior) = OPTIONAL]; } // A request to cancel a job. @@ -808,7 +800,7 @@ message CancelJobRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The job ID. @@ -821,7 +813,7 @@ message DeleteJobRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The job ID. diff --git a/google/cloud/dataproc_v1beta2/proto/jobs_pb2.py b/google/cloud/dataproc_v1beta2/proto/jobs_pb2.py index c40e358b..37e4a1bb 100644 --- a/google/cloud/dataproc_v1beta2/proto/jobs_pb2.py +++ b/google/cloud/dataproc_v1beta2/proto/jobs_pb2.py @@ -18,6 +18,9 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.api import client_pb2 as google_dot_api_dot_client__pb2 from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.longrunning import ( + operations_pb2 as google_dot_longrunning_dot_operations__pb2, +) from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 @@ -31,12 +34,13 @@ "\n!com.google.cloud.dataproc.v1beta2B\tJobsProtoP\001ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataproc" ), serialized_pb=_b( - '\n.google/cloud/dataproc_v1beta2/proto/jobs.proto\x12\x1dgoogle.cloud.dataproc.v1beta2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xcb\x02\n\rLoggingConfig\x12\\\n\x11\x64river_log_levels\x18\x02 \x03(\x0b\x32\x41.google.cloud.dataproc.v1beta2.LoggingConfig.DriverLogLevelsEntry\x1aj\n\x14\x44riverLogLevelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x41\n\x05value\x18\x02 \x01(\x0e\x32\x32.google.cloud.dataproc.v1beta2.LoggingConfig.Level:\x02\x38\x01"p\n\x05Level\x12\x15\n\x11LEVEL_UNSPECIFIED\x10\x00\x12\x07\n\x03\x41LL\x10\x01\x12\t\n\x05TRACE\x10\x02\x12\t\n\x05\x44\x45\x42UG\x10\x03\x12\x08\n\x04INFO\x10\x04\x12\x08\n\x04WARN\x10\x05\x12\t\n\x05\x45RROR\x10\x06\x12\t\n\x05\x46\x41TAL\x10\x07\x12\x07\n\x03OFF\x10\x08"\xdd\x02\n\tHadoopJob\x12\x1b\n\x11main_jar_file_uri\x18\x01 \x01(\tH\x00\x12\x14\n\nmain_class\x18\x02 \x01(\tH\x00\x12\x0c\n\x04\x61rgs\x18\x03 \x03(\t\x12\x15\n\rjar_file_uris\x18\x04 \x03(\t\x12\x11\n\tfile_uris\x18\x05 \x03(\t\x12\x14\n\x0c\x61rchive_uris\x18\x06 \x03(\t\x12L\n\nproperties\x18\x07 \x03(\x0b\x32\x38.google.cloud.dataproc.v1beta2.HadoopJob.PropertiesEntry\x12\x44\n\x0elogging_config\x18\x08 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfig\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06\x64river"\xdb\x02\n\x08SparkJob\x12\x1b\n\x11main_jar_file_uri\x18\x01 \x01(\tH\x00\x12\x14\n\nmain_class\x18\x02 \x01(\tH\x00\x12\x0c\n\x04\x61rgs\x18\x03 \x03(\t\x12\x15\n\rjar_file_uris\x18\x04 \x03(\t\x12\x11\n\tfile_uris\x18\x05 \x03(\t\x12\x14\n\x0c\x61rchive_uris\x18\x06 \x03(\t\x12K\n\nproperties\x18\x07 \x03(\x0b\x32\x37.google.cloud.dataproc.v1beta2.SparkJob.PropertiesEntry\x12\x44\n\x0elogging_config\x18\x08 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfig\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06\x64river"\xdf\x02\n\nPySparkJob\x12!\n\x14main_python_file_uri\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x0c\n\x04\x61rgs\x18\x02 \x03(\t\x12\x18\n\x10python_file_uris\x18\x03 \x03(\t\x12\x15\n\rjar_file_uris\x18\x04 \x03(\t\x12\x11\n\tfile_uris\x18\x05 \x03(\t\x12\x14\n\x0c\x61rchive_uris\x18\x06 \x03(\t\x12M\n\nproperties\x18\x07 \x03(\x0b\x32\x39.google.cloud.dataproc.v1beta2.PySparkJob.PropertiesEntry\x12\x44\n\x0elogging_config\x18\x08 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfig\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"!\n\tQueryList\x12\x14\n\x07queries\x18\x01 \x03(\tB\x03\xe0\x41\x02"\xb0\x03\n\x07HiveJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12>\n\nquery_list\x18\x02 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.QueryListH\x00\x12\x1b\n\x13\x63ontinue_on_failure\x18\x03 \x01(\x08\x12U\n\x10script_variables\x18\x04 \x03(\x0b\x32;.google.cloud.dataproc.v1beta2.HiveJob.ScriptVariablesEntry\x12J\n\nproperties\x18\x05 \x03(\x0b\x32\x36.google.cloud.dataproc.v1beta2.HiveJob.PropertiesEntry\x12\x15\n\rjar_file_uris\x18\x06 \x03(\t\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"\xe5\x03\n\x0bSparkSqlJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12>\n\nquery_list\x18\x02 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.QueryListH\x00\x12Y\n\x10script_variables\x18\x03 \x03(\x0b\x32?.google.cloud.dataproc.v1beta2.SparkSqlJob.ScriptVariablesEntry\x12N\n\nproperties\x18\x04 \x03(\x0b\x32:.google.cloud.dataproc.v1beta2.SparkSqlJob.PropertiesEntry\x12\x15\n\rjar_file_uris\x18\x38 \x03(\t\x12\x44\n\x0elogging_config\x18\x06 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfig\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"\xf3\x03\n\x06PigJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12>\n\nquery_list\x18\x02 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.QueryListH\x00\x12\x1b\n\x13\x63ontinue_on_failure\x18\x03 \x01(\x08\x12T\n\x10script_variables\x18\x04 \x03(\x0b\x32:.google.cloud.dataproc.v1beta2.PigJob.ScriptVariablesEntry\x12I\n\nproperties\x18\x05 \x03(\x0b\x32\x35.google.cloud.dataproc.v1beta2.PigJob.PropertiesEntry\x12\x15\n\rjar_file_uris\x18\x06 \x03(\t\x12\x44\n\x0elogging_config\x18\x07 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfig\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"\xa7\x02\n\tSparkRJob\x12\x1c\n\x0fmain_r_file_uri\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x0c\n\x04\x61rgs\x18\x02 \x03(\t\x12\x11\n\tfile_uris\x18\x03 \x03(\t\x12\x14\n\x0c\x61rchive_uris\x18\x04 \x03(\t\x12L\n\nproperties\x18\x05 \x03(\x0b\x32\x38.google.cloud.dataproc.v1beta2.SparkRJob.PropertiesEntry\x12\x44\n\x0elogging_config\x18\x06 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfig\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"?\n\x0cJobPlacement\x12\x19\n\x0c\x63luster_name\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x14\n\x0c\x63luster_uuid\x18\x02 \x01(\t"\xcc\x03\n\tJobStatus\x12=\n\x05state\x18\x01 \x01(\x0e\x32..google.cloud.dataproc.v1beta2.JobStatus.State\x12\x0f\n\x07\x64\x65tails\x18\x02 \x01(\t\x12\x34\n\x10state_start_time\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x43\n\x08substate\x18\x07 \x01(\x0e\x32\x31.google.cloud.dataproc.v1beta2.JobStatus.Substate"\xa9\x01\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\x0e\n\nSETUP_DONE\x10\x08\x12\x0b\n\x07RUNNING\x10\x02\x12\x12\n\x0e\x43\x41NCEL_PENDING\x10\x03\x12\x12\n\x0e\x43\x41NCEL_STARTED\x10\x07\x12\r\n\tCANCELLED\x10\x04\x12\x08\n\x04\x44ONE\x10\x05\x12\t\n\x05\x45RROR\x10\x06\x12\x13\n\x0f\x41TTEMPT_FAILURE\x10\t"H\n\x08Substate\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\r\n\tSUBMITTED\x10\x01\x12\n\n\x06QUEUED\x10\x02\x12\x10\n\x0cSTALE_STATUS\x10\x03"7\n\x0cJobReference\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x0e\n\x06job_id\x18\x02 \x01(\t"\xaa\x02\n\x0fYarnApplication\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12H\n\x05state\x18\x02 \x01(\x0e\x32\x34.google.cloud.dataproc.v1beta2.YarnApplication.StateB\x03\xe0\x41\x03\x12\x15\n\x08progress\x18\x03 \x01(\x02\x42\x03\xe0\x41\x03\x12\x19\n\x0ctracking_url\x18\x04 \x01(\tB\x03\xe0\x41\x03"\x87\x01\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x07\n\x03NEW\x10\x01\x12\x0e\n\nNEW_SAVING\x10\x02\x12\r\n\tSUBMITTED\x10\x03\x12\x0c\n\x08\x41\x43\x43\x45PTED\x10\x04\x12\x0b\n\x07RUNNING\x10\x05\x12\x0c\n\x08\x46INISHED\x10\x06\x12\n\n\x06\x46\x41ILED\x10\x07\x12\n\n\x06KILLED\x10\x08"\xb8\x08\n\x03Job\x12>\n\treference\x18\x01 \x01(\x0b\x32+.google.cloud.dataproc.v1beta2.JobReference\x12\x43\n\tplacement\x18\x02 \x01(\x0b\x32+.google.cloud.dataproc.v1beta2.JobPlacementB\x03\xe0\x41\x02\x12>\n\nhadoop_job\x18\x03 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.HadoopJobH\x00\x12<\n\tspark_job\x18\x04 \x01(\x0b\x32\'.google.cloud.dataproc.v1beta2.SparkJobH\x00\x12@\n\x0bpyspark_job\x18\x05 \x01(\x0b\x32).google.cloud.dataproc.v1beta2.PySparkJobH\x00\x12:\n\x08hive_job\x18\x06 \x01(\x0b\x32&.google.cloud.dataproc.v1beta2.HiveJobH\x00\x12\x38\n\x07pig_job\x18\x07 \x01(\x0b\x32%.google.cloud.dataproc.v1beta2.PigJobH\x00\x12?\n\x0bspark_r_job\x18\x15 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.SparkRJobH\x00\x12\x43\n\rspark_sql_job\x18\x0c \x01(\x0b\x32*.google.cloud.dataproc.v1beta2.SparkSqlJobH\x00\x12\x38\n\x06status\x18\x08 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.JobStatus\x12@\n\x0estatus_history\x18\r \x03(\x0b\x32(.google.cloud.dataproc.v1beta2.JobStatus\x12I\n\x11yarn_applications\x18\t \x03(\x0b\x32..google.cloud.dataproc.v1beta2.YarnApplication\x12\x14\n\x0csubmitted_by\x18\n \x01(\t\x12"\n\x1a\x64river_output_resource_uri\x18\x11 \x01(\t\x12 \n\x18\x64river_control_files_uri\x18\x0f \x01(\t\x12>\n\x06labels\x18\x12 \x03(\x0b\x32..google.cloud.dataproc.v1beta2.Job.LabelsEntry\x12@\n\nscheduling\x18\x14 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.JobScheduling\x12\x10\n\x08job_uuid\x18\x16 \x01(\t\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\n\n\x08type_job".\n\rJobScheduling\x12\x1d\n\x15max_failures_per_hour\x18\x01 \x01(\x05"\x8a\x01\n\x10SubmitJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x34\n\x03job\x18\x02 \x01(\x0b\x32".google.cloud.dataproc.v1beta2.JobB\x03\xe0\x41\x02\x12\x12\n\nrequest_id\x18\x04 \x01(\t"R\n\rGetJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02"\x9f\x02\n\x0fListJobsRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x06 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\x12\x14\n\x0c\x63luster_name\x18\x04 \x01(\t\x12Y\n\x11job_state_matcher\x18\x05 \x01(\x0e\x32>.google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher\x12\x0e\n\x06\x66ilter\x18\x07 \x01(\t"6\n\x0fJobStateMatcher\x12\x07\n\x03\x41LL\x10\x00\x12\n\n\x06\x41\x43TIVE\x10\x01\x12\x0e\n\nNON_ACTIVE\x10\x02"\xc1\x01\n\x10UpdateJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x34\n\x03job\x18\x04 \x01(\x0b\x32".google.cloud.dataproc.v1beta2.JobB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"]\n\x10ListJobsResponse\x12\x30\n\x04jobs\x18\x01 \x03(\x0b\x32".google.cloud.dataproc.v1beta2.Job\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"U\n\x10\x43\x61ncelJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02"U\n\x10\x44\x65leteJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x32\xfb\t\n\rJobController\x12\xc2\x01\n\tSubmitJob\x12/.google.cloud.dataproc.v1beta2.SubmitJobRequest\x1a".google.cloud.dataproc.v1beta2.Job"`\x82\xd3\xe4\x93\x02@";/v1beta2/projects/{project_id}/regions/{region}/jobs:submit:\x01*\xda\x41\x17project_id, region, job\x12\xbe\x01\n\x06GetJob\x12,.google.cloud.dataproc.v1beta2.GetJobRequest\x1a".google.cloud.dataproc.v1beta2.Job"b\x82\xd3\xe4\x93\x02?\x12=/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}\xda\x41\x1aproject_id, region, job_id\x12\xdb\x01\n\x08ListJobs\x12..google.cloud.dataproc.v1beta2.ListJobsRequest\x1a/.google.cloud.dataproc.v1beta2.ListJobsResponse"n\x82\xd3\xe4\x93\x02\x36\x12\x34/v1beta2/projects/{project_id}/regions/{region}/jobs\xda\x41\x12project_id, region\xda\x41\x1aproject_id, region, filter\x12\xac\x01\n\tUpdateJob\x12/.google.cloud.dataproc.v1beta2.UpdateJobRequest\x1a".google.cloud.dataproc.v1beta2.Job"J\x82\xd3\xe4\x93\x02\x44\x32=/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}:\x03job\x12\xce\x01\n\tCancelJob\x12/.google.cloud.dataproc.v1beta2.CancelJobRequest\x1a".google.cloud.dataproc.v1beta2.Job"l\x82\xd3\xe4\x93\x02I"D/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}:cancel:\x01*\xda\x41\x1aproject_id, region, job_id\x12\xb8\x01\n\tDeleteJob\x12/.google.cloud.dataproc.v1beta2.DeleteJobRequest\x1a\x16.google.protobuf.Empty"b\x82\xd3\xe4\x93\x02?*=/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}\xda\x41\x1aproject_id, region, job_id\x1aK\xca\x41\x17\x64\x61taproc.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformBw\n!com.google.cloud.dataproc.v1beta2B\tJobsProtoP\x01ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataprocb\x06proto3' + '\n.google/cloud/dataproc_v1beta2/proto/jobs.proto\x12\x1dgoogle.cloud.dataproc.v1beta2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xcb\x02\n\rLoggingConfig\x12\\\n\x11\x64river_log_levels\x18\x02 \x03(\x0b\x32\x41.google.cloud.dataproc.v1beta2.LoggingConfig.DriverLogLevelsEntry\x1aj\n\x14\x44riverLogLevelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x41\n\x05value\x18\x02 \x01(\x0e\x32\x32.google.cloud.dataproc.v1beta2.LoggingConfig.Level:\x02\x38\x01"p\n\x05Level\x12\x15\n\x11LEVEL_UNSPECIFIED\x10\x00\x12\x07\n\x03\x41LL\x10\x01\x12\t\n\x05TRACE\x10\x02\x12\t\n\x05\x44\x45\x42UG\x10\x03\x12\x08\n\x04INFO\x10\x04\x12\x08\n\x04WARN\x10\x05\x12\t\n\x05\x45RROR\x10\x06\x12\t\n\x05\x46\x41TAL\x10\x07\x12\x07\n\x03OFF\x10\x08"\xfb\x02\n\tHadoopJob\x12\x1b\n\x11main_jar_file_uri\x18\x01 \x01(\tH\x00\x12\x14\n\nmain_class\x18\x02 \x01(\tH\x00\x12\x11\n\x04\x61rgs\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12Q\n\nproperties\x18\x07 \x03(\x0b\x32\x38.google.cloud.dataproc.v1beta2.HadoopJob.PropertiesEntryB\x03\xe0\x41\x01\x12I\n\x0elogging_config\x18\x08 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06\x64river"\xf9\x02\n\x08SparkJob\x12\x1b\n\x11main_jar_file_uri\x18\x01 \x01(\tH\x00\x12\x14\n\nmain_class\x18\x02 \x01(\tH\x00\x12\x11\n\x04\x61rgs\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12P\n\nproperties\x18\x07 \x03(\x0b\x32\x37.google.cloud.dataproc.v1beta2.SparkJob.PropertiesEntryB\x03\xe0\x41\x01\x12I\n\x0elogging_config\x18\x08 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06\x64river"\x82\x03\n\nPySparkJob\x12!\n\x14main_python_file_uri\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\x04\x61rgs\x18\x02 \x03(\tB\x03\xe0\x41\x01\x12\x1d\n\x10python_file_uris\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12R\n\nproperties\x18\x07 \x03(\x0b\x32\x39.google.cloud.dataproc.v1beta2.PySparkJob.PropertiesEntryB\x03\xe0\x41\x01\x12I\n\x0elogging_config\x18\x08 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"!\n\tQueryList\x12\x14\n\x07queries\x18\x01 \x03(\tB\x03\xe0\x41\x02"\xc4\x03\n\x07HiveJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12>\n\nquery_list\x18\x02 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.QueryListH\x00\x12 \n\x13\x63ontinue_on_failure\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12Z\n\x10script_variables\x18\x04 \x03(\x0b\x32;.google.cloud.dataproc.v1beta2.HiveJob.ScriptVariablesEntryB\x03\xe0\x41\x01\x12O\n\nproperties\x18\x05 \x03(\x0b\x32\x36.google.cloud.dataproc.v1beta2.HiveJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"\xf9\x03\n\x0bSparkSqlJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12>\n\nquery_list\x18\x02 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.QueryListH\x00\x12^\n\x10script_variables\x18\x03 \x03(\x0b\x32?.google.cloud.dataproc.v1beta2.SparkSqlJob.ScriptVariablesEntryB\x03\xe0\x41\x01\x12S\n\nproperties\x18\x04 \x03(\x0b\x32:.google.cloud.dataproc.v1beta2.SparkSqlJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x38 \x03(\tB\x03\xe0\x41\x01\x12I\n\x0elogging_config\x18\x06 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfigB\x03\xe0\x41\x01\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"\x8c\x04\n\x06PigJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12>\n\nquery_list\x18\x02 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.QueryListH\x00\x12 \n\x13\x63ontinue_on_failure\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12Y\n\x10script_variables\x18\x04 \x03(\x0b\x32:.google.cloud.dataproc.v1beta2.PigJob.ScriptVariablesEntryB\x03\xe0\x41\x01\x12N\n\nproperties\x18\x05 \x03(\x0b\x32\x35.google.cloud.dataproc.v1beta2.PigJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12I\n\x0elogging_config\x18\x07 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfigB\x03\xe0\x41\x01\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"\xc0\x02\n\tSparkRJob\x12\x1c\n\x0fmain_r_file_uri\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\x04\x61rgs\x18\x02 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12Q\n\nproperties\x18\x05 \x03(\x0b\x32\x38.google.cloud.dataproc.v1beta2.SparkRJob.PropertiesEntryB\x03\xe0\x41\x01\x12I\n\x0elogging_config\x18\x06 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"D\n\x0cJobPlacement\x12\x19\n\x0c\x63luster_name\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_uuid\x18\x02 \x01(\tB\x03\xe0\x41\x03"\xe0\x03\n\tJobStatus\x12\x42\n\x05state\x18\x01 \x01(\x0e\x32..google.cloud.dataproc.v1beta2.JobStatus.StateB\x03\xe0\x41\x03\x12\x14\n\x07\x64\x65tails\x18\x02 \x01(\tB\x03\xe0\x41\x03\x12\x39\n\x10state_start_time\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12H\n\x08substate\x18\x07 \x01(\x0e\x32\x31.google.cloud.dataproc.v1beta2.JobStatus.SubstateB\x03\xe0\x41\x03"\xa9\x01\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\x0e\n\nSETUP_DONE\x10\x08\x12\x0b\n\x07RUNNING\x10\x02\x12\x12\n\x0e\x43\x41NCEL_PENDING\x10\x03\x12\x12\n\x0e\x43\x41NCEL_STARTED\x10\x07\x12\r\n\tCANCELLED\x10\x04\x12\x08\n\x04\x44ONE\x10\x05\x12\t\n\x05\x45RROR\x10\x06\x12\x13\n\x0f\x41TTEMPT_FAILURE\x10\t"H\n\x08Substate\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\r\n\tSUBMITTED\x10\x01\x12\n\n\x06QUEUED\x10\x02\x12\x10\n\x0cSTALE_STATUS\x10\x03"<\n\x0cJobReference\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x01"\xaa\x02\n\x0fYarnApplication\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12H\n\x05state\x18\x02 \x01(\x0e\x32\x34.google.cloud.dataproc.v1beta2.YarnApplication.StateB\x03\xe0\x41\x03\x12\x15\n\x08progress\x18\x03 \x01(\x02\x42\x03\xe0\x41\x03\x12\x19\n\x0ctracking_url\x18\x04 \x01(\tB\x03\xe0\x41\x03"\x87\x01\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x07\n\x03NEW\x10\x01\x12\x0e\n\nNEW_SAVING\x10\x02\x12\r\n\tSUBMITTED\x10\x03\x12\x0c\n\x08\x41\x43\x43\x45PTED\x10\x04\x12\x0b\n\x07RUNNING\x10\x05\x12\x0c\n\x08\x46INISHED\x10\x06\x12\n\n\x06\x46\x41ILED\x10\x07\x12\n\n\x06KILLED\x10\x08"\xea\x08\n\x03Job\x12\x43\n\treference\x18\x01 \x01(\x0b\x32+.google.cloud.dataproc.v1beta2.JobReferenceB\x03\xe0\x41\x01\x12\x43\n\tplacement\x18\x02 \x01(\x0b\x32+.google.cloud.dataproc.v1beta2.JobPlacementB\x03\xe0\x41\x02\x12>\n\nhadoop_job\x18\x03 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.HadoopJobH\x00\x12<\n\tspark_job\x18\x04 \x01(\x0b\x32\'.google.cloud.dataproc.v1beta2.SparkJobH\x00\x12@\n\x0bpyspark_job\x18\x05 \x01(\x0b\x32).google.cloud.dataproc.v1beta2.PySparkJobH\x00\x12:\n\x08hive_job\x18\x06 \x01(\x0b\x32&.google.cloud.dataproc.v1beta2.HiveJobH\x00\x12\x38\n\x07pig_job\x18\x07 \x01(\x0b\x32%.google.cloud.dataproc.v1beta2.PigJobH\x00\x12?\n\x0bspark_r_job\x18\x15 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.SparkRJobH\x00\x12\x43\n\rspark_sql_job\x18\x0c \x01(\x0b\x32*.google.cloud.dataproc.v1beta2.SparkSqlJobH\x00\x12=\n\x06status\x18\x08 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.JobStatusB\x03\xe0\x41\x03\x12\x45\n\x0estatus_history\x18\r \x03(\x0b\x32(.google.cloud.dataproc.v1beta2.JobStatusB\x03\xe0\x41\x03\x12N\n\x11yarn_applications\x18\t \x03(\x0b\x32..google.cloud.dataproc.v1beta2.YarnApplicationB\x03\xe0\x41\x03\x12\x19\n\x0csubmitted_by\x18\n \x01(\tB\x03\xe0\x41\x03\x12\'\n\x1a\x64river_output_resource_uri\x18\x11 \x01(\tB\x03\xe0\x41\x03\x12%\n\x18\x64river_control_files_uri\x18\x0f \x01(\tB\x03\xe0\x41\x03\x12\x43\n\x06labels\x18\x12 \x03(\x0b\x32..google.cloud.dataproc.v1beta2.Job.LabelsEntryB\x03\xe0\x41\x01\x12\x45\n\nscheduling\x18\x14 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.JobSchedulingB\x03\xe0\x41\x01\x12\x15\n\x08job_uuid\x18\x16 \x01(\tB\x03\xe0\x41\x03\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\n\n\x08type_job"3\n\rJobScheduling\x12"\n\x15max_failures_per_hour\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01"\x8f\x01\n\x10SubmitJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x34\n\x03job\x18\x02 \x01(\x0b\x32".google.cloud.dataproc.v1beta2.JobB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x04 \x01(\tB\x03\xe0\x41\x01"R\n\rGetJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02"\xb8\x02\n\x0fListJobsRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x06 \x01(\tB\x03\xe0\x41\x02\x12\x16\n\tpage_size\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x17\n\npage_token\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x63luster_name\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12^\n\x11job_state_matcher\x18\x05 \x01(\x0e\x32>.google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcherB\x03\xe0\x41\x01\x12\x13\n\x06\x66ilter\x18\x07 \x01(\tB\x03\xe0\x41\x01"6\n\x0fJobStateMatcher\x12\x07\n\x03\x41LL\x10\x00\x12\n\n\x06\x41\x43TIVE\x10\x01\x12\x0e\n\nNON_ACTIVE\x10\x02"\xc1\x01\n\x10UpdateJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x34\n\x03job\x18\x04 \x01(\x0b\x32".google.cloud.dataproc.v1beta2.JobB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"g\n\x10ListJobsResponse\x12\x35\n\x04jobs\x18\x01 \x03(\x0b\x32".google.cloud.dataproc.v1beta2.JobB\x03\xe0\x41\x03\x12\x1c\n\x0fnext_page_token\x18\x02 \x01(\tB\x03\xe0\x41\x01"U\n\x10\x43\x61ncelJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02"U\n\x10\x44\x65leteJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x32\xfb\t\n\rJobController\x12\xc2\x01\n\tSubmitJob\x12/.google.cloud.dataproc.v1beta2.SubmitJobRequest\x1a".google.cloud.dataproc.v1beta2.Job"`\x82\xd3\xe4\x93\x02@";/v1beta2/projects/{project_id}/regions/{region}/jobs:submit:\x01*\xda\x41\x17project_id, region, job\x12\xbe\x01\n\x06GetJob\x12,.google.cloud.dataproc.v1beta2.GetJobRequest\x1a".google.cloud.dataproc.v1beta2.Job"b\x82\xd3\xe4\x93\x02?\x12=/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}\xda\x41\x1aproject_id, region, job_id\x12\xdb\x01\n\x08ListJobs\x12..google.cloud.dataproc.v1beta2.ListJobsRequest\x1a/.google.cloud.dataproc.v1beta2.ListJobsResponse"n\x82\xd3\xe4\x93\x02\x36\x12\x34/v1beta2/projects/{project_id}/regions/{region}/jobs\xda\x41\x12project_id, region\xda\x41\x1aproject_id, region, filter\x12\xac\x01\n\tUpdateJob\x12/.google.cloud.dataproc.v1beta2.UpdateJobRequest\x1a".google.cloud.dataproc.v1beta2.Job"J\x82\xd3\xe4\x93\x02\x44\x32=/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}:\x03job\x12\xce\x01\n\tCancelJob\x12/.google.cloud.dataproc.v1beta2.CancelJobRequest\x1a".google.cloud.dataproc.v1beta2.Job"l\x82\xd3\xe4\x93\x02I"D/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}:cancel:\x01*\xda\x41\x1aproject_id, region, job_id\x12\xb8\x01\n\tDeleteJob\x12/.google.cloud.dataproc.v1beta2.DeleteJobRequest\x1a\x16.google.protobuf.Empty"b\x82\xd3\xe4\x93\x02?*=/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}\xda\x41\x1aproject_id, region, job_id\x1aK\xca\x41\x17\x64\x61taproc.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformBw\n!com.google.cloud.dataproc.v1beta2B\tJobsProtoP\x01ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataprocb\x06proto3' ), dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, + google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, @@ -84,8 +88,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=485, - serialized_end=597, + serialized_start=522, + serialized_end=634, ) _sym_db.RegisterEnumDescriptor(_LOGGINGCONFIG_LEVEL) @@ -136,8 +140,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3696, - serialized_end=3865, + serialized_start=3943, + serialized_end=4112, ) _sym_db.RegisterEnumDescriptor(_JOBSTATUS_STATE) @@ -162,8 +166,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3867, - serialized_end=3939, + serialized_start=4114, + serialized_end=4186, ) _sym_db.RegisterEnumDescriptor(_JOBSTATUS_SUBSTATE) @@ -207,8 +211,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4162, - serialized_end=4297, + serialized_start=4414, + serialized_end=4549, ) _sym_db.RegisterEnumDescriptor(_YARNAPPLICATION_STATE) @@ -230,8 +234,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=5889, - serialized_end=5943, + serialized_start=6226, + serialized_end=6280, ) _sym_db.RegisterEnumDescriptor(_LISTJOBSREQUEST_JOBSTATEMATCHER) @@ -288,8 +292,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=377, - serialized_end=483, + serialized_start=414, + serialized_end=520, ) _LOGGINGCONFIG = _descriptor.Descriptor( @@ -326,8 +330,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=266, - serialized_end=597, + serialized_start=303, + serialized_end=634, ) @@ -383,8 +387,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=890, - serialized_end=939, + serialized_start=957, + serialized_end=1006, ) _HADOOPJOB = _descriptor.Descriptor( @@ -445,7 +449,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -463,7 +467,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -481,7 +485,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -499,7 +503,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -517,7 +521,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -535,7 +539,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), ], @@ -555,8 +559,8 @@ fields=[], ) ], - serialized_start=600, - serialized_end=949, + serialized_start=637, + serialized_end=1016, ) @@ -612,8 +616,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=890, - serialized_end=939, + serialized_start=957, + serialized_end=1006, ) _SPARKJOB = _descriptor.Descriptor( @@ -674,7 +678,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -692,7 +696,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -710,7 +714,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -728,7 +732,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -746,7 +750,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -764,7 +768,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), ], @@ -784,8 +788,8 @@ fields=[], ) ], - serialized_start=952, - serialized_end=1299, + serialized_start=1019, + serialized_end=1396, ) @@ -841,8 +845,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=890, - serialized_end=939, + serialized_start=957, + serialized_end=1006, ) _PYSPARKJOB = _descriptor.Descriptor( @@ -885,7 +889,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -903,7 +907,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -921,7 +925,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -939,7 +943,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -957,7 +961,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -975,7 +979,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -993,7 +997,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), ], @@ -1005,8 +1009,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1302, - serialized_end=1653, + serialized_start=1399, + serialized_end=1785, ) @@ -1044,8 +1048,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1655, - serialized_end=1688, + serialized_start=1787, + serialized_end=1820, ) @@ -1101,8 +1105,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2007, - serialized_end=2061, + serialized_start=2159, + serialized_end=2213, ) _HIVEJOB_PROPERTIESENTRY = _descriptor.Descriptor( @@ -1157,8 +1161,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=890, - serialized_end=939, + serialized_start=957, + serialized_end=1006, ) _HIVEJOB = _descriptor.Descriptor( @@ -1219,7 +1223,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1237,7 +1241,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1255,7 +1259,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1273,7 +1277,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), ], @@ -1293,8 +1297,8 @@ fields=[], ) ], - serialized_start=1691, - serialized_end=2123, + serialized_start=1823, + serialized_end=2275, ) @@ -1350,8 +1354,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2007, - serialized_end=2061, + serialized_start=2159, + serialized_end=2213, ) _SPARKSQLJOB_PROPERTIESENTRY = _descriptor.Descriptor( @@ -1406,8 +1410,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=890, - serialized_end=939, + serialized_start=957, + serialized_end=1006, ) _SPARKSQLJOB = _descriptor.Descriptor( @@ -1468,7 +1472,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1486,7 +1490,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1504,7 +1508,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1522,7 +1526,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), ], @@ -1542,8 +1546,8 @@ fields=[], ) ], - serialized_start=2126, - serialized_end=2611, + serialized_start=2278, + serialized_end=2783, ) @@ -1599,8 +1603,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2007, - serialized_end=2061, + serialized_start=2159, + serialized_end=2213, ) _PIGJOB_PROPERTIESENTRY = _descriptor.Descriptor( @@ -1655,8 +1659,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=890, - serialized_end=939, + serialized_start=957, + serialized_end=1006, ) _PIGJOB = _descriptor.Descriptor( @@ -1717,7 +1721,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1735,7 +1739,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1753,7 +1757,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1771,7 +1775,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1789,7 +1793,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), ], @@ -1809,8 +1813,8 @@ fields=[], ) ], - serialized_start=2614, - serialized_end=3113, + serialized_start=2786, + serialized_end=3310, ) @@ -1866,8 +1870,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=890, - serialized_end=939, + serialized_start=957, + serialized_end=1006, ) _SPARKRJOB = _descriptor.Descriptor( @@ -1910,7 +1914,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1928,7 +1932,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1946,7 +1950,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1964,7 +1968,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1982,7 +1986,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), ], @@ -1994,8 +1998,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3116, - serialized_end=3411, + serialized_start=3313, + serialized_end=3633, ) @@ -2039,7 +2043,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\003"), file=DESCRIPTOR, ), ], @@ -2051,8 +2055,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3413, - serialized_end=3476, + serialized_start=3635, + serialized_end=3703, ) @@ -2078,7 +2082,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\003"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2096,7 +2100,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\003"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2114,7 +2118,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\003"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2132,7 +2136,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\003"), file=DESCRIPTOR, ), ], @@ -2144,8 +2148,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3479, - serialized_end=3939, + serialized_start=3706, + serialized_end=4186, ) @@ -2189,7 +2193,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), ], @@ -2201,8 +2205,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3941, - serialized_end=3996, + serialized_start=4188, + serialized_end=4248, ) @@ -2294,8 +2298,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3999, - serialized_end=4297, + serialized_start=4251, + serialized_end=4549, ) @@ -2351,8 +2355,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5323, - serialized_end=5368, + serialized_start=5625, + serialized_end=5670, ) _JOB = _descriptor.Descriptor( @@ -2377,7 +2381,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2539,7 +2543,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\003"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2557,7 +2561,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\003"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2575,7 +2579,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\003"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2593,7 +2597,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\003"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2611,7 +2615,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\003"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2629,7 +2633,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\003"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2647,7 +2651,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2665,7 +2669,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2683,7 +2687,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\003"), file=DESCRIPTOR, ), ], @@ -2703,8 +2707,8 @@ fields=[], ) ], - serialized_start=4300, - serialized_end=5380, + serialized_start=4552, + serialized_end=5682, ) @@ -2730,7 +2734,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ) ], @@ -2742,8 +2746,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5382, - serialized_end=5428, + serialized_start=5684, + serialized_end=5735, ) @@ -2823,7 +2827,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), ], @@ -2835,8 +2839,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5431, - serialized_end=5569, + serialized_start=5738, + serialized_end=5881, ) @@ -2910,8 +2914,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5571, - serialized_end=5653, + serialized_start=5883, + serialized_end=5965, ) @@ -2973,7 +2977,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2991,7 +2995,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -3009,7 +3013,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -3027,7 +3031,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -3045,7 +3049,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), ], @@ -3057,8 +3061,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5656, - serialized_end=5943, + serialized_start=5968, + serialized_end=6280, ) @@ -3168,8 +3172,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5946, - serialized_end=6139, + serialized_start=6283, + serialized_end=6476, ) @@ -3195,7 +3199,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\003"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -3213,7 +3217,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), ], @@ -3225,8 +3229,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6141, - serialized_end=6234, + serialized_start=6478, + serialized_end=6581, ) @@ -3300,8 +3304,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6236, - serialized_end=6321, + serialized_start=6583, + serialized_end=6668, ) @@ -3375,8 +3379,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6323, - serialized_end=6408, + serialized_start=6670, + serialized_end=6755, ) _LOGGINGCONFIG_DRIVERLOGLEVELSENTRY.fields_by_name[ @@ -3589,7 +3593,7 @@ ), DESCRIPTOR=_HADOOPJOB, __module__="google.cloud.dataproc_v1beta2.proto.jobs_pb2", - __doc__="""A Cloud Dataproc job for running `Apache Hadoop + __doc__="""A Dataproc job for running `Apache Hadoop MapReduce `__ jobs on `Apache Hadoop YARN `__. @@ -3631,9 +3635,8 @@ properties: Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by - the Cloud Dataproc API may be overwritten. Can include - properties set in /etc/hadoop/conf/\*-site and classes in user - code. + the Dataproc API may be overwritten. Can include properties + set in /etc/hadoop/conf/\*-site and classes in user code. logging_config: Optional. The runtime log config for job execution. """, @@ -3658,7 +3661,7 @@ ), DESCRIPTOR=_SPARKJOB, __module__="google.cloud.dataproc_v1beta2.proto.jobs_pb2", - __doc__="""A Cloud Dataproc job for running `Apache + __doc__="""A Dataproc job for running `Apache Spark `__ applications on YARN. The specification of the main method to call to drive the job. Specify either the jar file that contains the main class or the main class name. @@ -3693,9 +3696,9 @@ properties: Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by - the Cloud Dataproc API may be overwritten. Can include - properties set in /etc/spark/conf/spark-defaults.conf and - classes in user code. + the Dataproc API may be overwritten. Can include properties + set in /etc/spark/conf/spark-defaults.conf and classes in user + code. logging_config: Optional. The runtime log config for job execution. """, @@ -3720,7 +3723,7 @@ ), DESCRIPTOR=_PYSPARKJOB, __module__="google.cloud.dataproc_v1beta2.proto.jobs_pb2", - __doc__="""A Cloud Dataproc job for running `Apache + __doc__="""A Dataproc job for running `Apache PySpark `__ applications on YARN. @@ -3750,9 +3753,9 @@ properties: Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by - the Cloud Dataproc API may be overwritten. Can include - properties set in /etc/spark/conf/spark-defaults.conf and - classes in user code. + the Dataproc API may be overwritten. Can include properties + set in /etc/spark/conf/spark-defaults.conf and classes in user + code. logging_config: Optional. The runtime log config for job execution. """, @@ -3810,7 +3813,7 @@ ), DESCRIPTOR=_HIVEJOB, __module__="google.cloud.dataproc_v1beta2.proto.jobs_pb2", - __doc__="""A Cloud Dataproc job for running `Apache + __doc__="""A Dataproc job for running `Apache Hive `__ queries on YARN. @@ -3832,9 +3835,9 @@ properties: Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by - the Cloud Dataproc API may be overwritten. Can include - properties set in /etc/hadoop/conf/\*-site.xml, - /etc/hive/conf/hive-site.xml, and classes in user code. + the Dataproc API may be overwritten. Can include properties + set in /etc/hadoop/conf/\*-site.xml, /etc/hive/conf/hive- + site.xml, and classes in user code. jar_file_uris: Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain @@ -3871,7 +3874,7 @@ ), DESCRIPTOR=_SPARKSQLJOB, __module__="google.cloud.dataproc_v1beta2.proto.jobs_pb2", - __doc__="""A Cloud Dataproc job for running `Apache Spark + __doc__="""A Dataproc job for running `Apache Spark SQL `__ queries. @@ -3889,7 +3892,7 @@ properties: Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with - values set by the Cloud Dataproc API may be overwritten. + values set by the Dataproc API may be overwritten. jar_file_uris: Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. @@ -3927,7 +3930,7 @@ ), DESCRIPTOR=_PIGJOB, __module__="google.cloud.dataproc_v1beta2.proto.jobs_pb2", - __doc__="""A Cloud Dataproc job for running `Apache + __doc__="""A Dataproc job for running `Apache Pig `__ queries on YARN. @@ -3949,9 +3952,9 @@ properties: Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the - Cloud Dataproc API may be overwritten. Can include properties - set in /etc/hadoop/conf/\*-site.xml, - /etc/pig/conf/pig.properties, and classes in user code. + Dataproc API may be overwritten. Can include properties set in + /etc/hadoop/conf/\*-site.xml, /etc/pig/conf/pig.properties, + and classes in user code. jar_file_uris: Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain @@ -3981,7 +3984,7 @@ ), DESCRIPTOR=_SPARKRJOB, __module__="google.cloud.dataproc_v1beta2.proto.jobs_pb2", - __doc__="""A Cloud Dataproc job for running `Apache + __doc__="""A Dataproc job for running `Apache SparkR `__ applications on YARN. @@ -4006,9 +4009,9 @@ properties: Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by - the Cloud Dataproc API may be overwritten. Can include - properties set in /etc/spark/conf/spark-defaults.conf and - classes in user code. + the Dataproc API may be overwritten. Can include properties + set in /etc/spark/conf/spark-defaults.conf and classes in user + code. logging_config: Optional. The runtime log config for job execution. """, @@ -4024,7 +4027,7 @@ dict( DESCRIPTOR=_JOBPLACEMENT, __module__="google.cloud.dataproc_v1beta2.proto.jobs_pb2", - __doc__="""Cloud Dataproc job config. + __doc__="""Dataproc job config. Attributes: @@ -4032,8 +4035,8 @@ Required. The name of the cluster where the job will be submitted. cluster_uuid: - Output only. A cluster UUID generated by the Cloud Dataproc - service when the job is submitted. + Output only. A cluster UUID generated by the Dataproc service + when the job is submitted. """, # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.JobPlacement) ), @@ -4046,14 +4049,14 @@ dict( DESCRIPTOR=_JOBSTATUS, __module__="google.cloud.dataproc_v1beta2.proto.jobs_pb2", - __doc__="""Cloud Dataproc job status. + __doc__="""Dataproc job status. Attributes: state: Output only. A state message specifying the overall job state. details: - Output only. Optional job state details, such as an error + Output only. Optional Job state details, such as an error description if the state is ERROR. state_start_time: Output only. The time when this state was entered. @@ -4114,7 +4117,7 @@ Output only. The numerical progress of the application, from 1 to 100. tracking_url: - Optional. Output only. The HTTP URL of the ApplicationMaster, + Output only. The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application- specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access. @@ -4139,7 +4142,7 @@ ), DESCRIPTOR=_JOB, __module__="google.cloud.dataproc_v1beta2.proto.jobs_pb2", - __doc__="""A Cloud Dataproc job resource. + __doc__="""A Dataproc job resource. Attributes: @@ -4154,20 +4157,6 @@ type_job: Required. The application/framework-specific portion of the job. - hadoop_job: - Job is a Hadoop job. - spark_job: - Job is a Spark job. - pyspark_job: - Job is a Pyspark job. - hive_job: - Job is a Hive job. - pig_job: - Job is a Pig job. - spark_r_job: - Job is a SparkR job. - spark_sql_job: - Job is a SparkSql job. status: Output only. The job status. Additional application-specific status information may be contained in the type\_job and @@ -4247,8 +4236,7 @@ Required. The ID of the Google Cloud Platform project that the job belongs to. region: - Required. The Cloud Dataproc region in which to handle the - request. + Required. The Dataproc region in which to handle the request. job: Required. The job resource. request_id: @@ -4283,8 +4271,7 @@ Required. The ID of the Google Cloud Platform project that the job belongs to. region: - Required. The Cloud Dataproc region in which to handle the - request. + Required. The Dataproc region in which to handle the request. job_id: Required. The job ID. """, @@ -4307,8 +4294,7 @@ Required. The ID of the Google Cloud Platform project that the job belongs to. region: - Required. The Cloud Dataproc region in which to handle the - request. + Required. The Dataproc region in which to handle the request. page_size: Optional. The number of results to return in each response. page_token: @@ -4352,8 +4338,7 @@ Required. The ID of the Google Cloud Platform project that the job belongs to. region: - Required. The Cloud Dataproc region in which to handle the - request. + Required. The Dataproc region in which to handle the request. job_id: Required. The job ID. job: @@ -4407,8 +4392,7 @@ Required. The ID of the Google Cloud Platform project that the job belongs to. region: - Required. The Cloud Dataproc region in which to handle the - request. + Required. The Dataproc region in which to handle the request. job_id: Required. The job ID. """, @@ -4431,8 +4415,7 @@ Required. The ID of the Google Cloud Platform project that the job belongs to. region: - Required. The Cloud Dataproc region in which to handle the - request. + Required. The Dataproc region in which to handle the request. job_id: Required. The job ID. """, @@ -4445,39 +4428,101 @@ DESCRIPTOR._options = None _LOGGINGCONFIG_DRIVERLOGLEVELSENTRY._options = None _HADOOPJOB_PROPERTIESENTRY._options = None +_HADOOPJOB.fields_by_name["args"]._options = None +_HADOOPJOB.fields_by_name["jar_file_uris"]._options = None +_HADOOPJOB.fields_by_name["file_uris"]._options = None +_HADOOPJOB.fields_by_name["archive_uris"]._options = None +_HADOOPJOB.fields_by_name["properties"]._options = None +_HADOOPJOB.fields_by_name["logging_config"]._options = None _SPARKJOB_PROPERTIESENTRY._options = None +_SPARKJOB.fields_by_name["args"]._options = None +_SPARKJOB.fields_by_name["jar_file_uris"]._options = None +_SPARKJOB.fields_by_name["file_uris"]._options = None +_SPARKJOB.fields_by_name["archive_uris"]._options = None +_SPARKJOB.fields_by_name["properties"]._options = None +_SPARKJOB.fields_by_name["logging_config"]._options = None _PYSPARKJOB_PROPERTIESENTRY._options = None _PYSPARKJOB.fields_by_name["main_python_file_uri"]._options = None +_PYSPARKJOB.fields_by_name["args"]._options = None +_PYSPARKJOB.fields_by_name["python_file_uris"]._options = None +_PYSPARKJOB.fields_by_name["jar_file_uris"]._options = None +_PYSPARKJOB.fields_by_name["file_uris"]._options = None +_PYSPARKJOB.fields_by_name["archive_uris"]._options = None +_PYSPARKJOB.fields_by_name["properties"]._options = None +_PYSPARKJOB.fields_by_name["logging_config"]._options = None _QUERYLIST.fields_by_name["queries"]._options = None _HIVEJOB_SCRIPTVARIABLESENTRY._options = None _HIVEJOB_PROPERTIESENTRY._options = None +_HIVEJOB.fields_by_name["continue_on_failure"]._options = None +_HIVEJOB.fields_by_name["script_variables"]._options = None +_HIVEJOB.fields_by_name["properties"]._options = None +_HIVEJOB.fields_by_name["jar_file_uris"]._options = None _SPARKSQLJOB_SCRIPTVARIABLESENTRY._options = None _SPARKSQLJOB_PROPERTIESENTRY._options = None +_SPARKSQLJOB.fields_by_name["script_variables"]._options = None +_SPARKSQLJOB.fields_by_name["properties"]._options = None +_SPARKSQLJOB.fields_by_name["jar_file_uris"]._options = None +_SPARKSQLJOB.fields_by_name["logging_config"]._options = None _PIGJOB_SCRIPTVARIABLESENTRY._options = None _PIGJOB_PROPERTIESENTRY._options = None +_PIGJOB.fields_by_name["continue_on_failure"]._options = None +_PIGJOB.fields_by_name["script_variables"]._options = None +_PIGJOB.fields_by_name["properties"]._options = None +_PIGJOB.fields_by_name["jar_file_uris"]._options = None +_PIGJOB.fields_by_name["logging_config"]._options = None _SPARKRJOB_PROPERTIESENTRY._options = None _SPARKRJOB.fields_by_name["main_r_file_uri"]._options = None +_SPARKRJOB.fields_by_name["args"]._options = None +_SPARKRJOB.fields_by_name["file_uris"]._options = None +_SPARKRJOB.fields_by_name["archive_uris"]._options = None +_SPARKRJOB.fields_by_name["properties"]._options = None +_SPARKRJOB.fields_by_name["logging_config"]._options = None _JOBPLACEMENT.fields_by_name["cluster_name"]._options = None +_JOBPLACEMENT.fields_by_name["cluster_uuid"]._options = None +_JOBSTATUS.fields_by_name["state"]._options = None +_JOBSTATUS.fields_by_name["details"]._options = None +_JOBSTATUS.fields_by_name["state_start_time"]._options = None +_JOBSTATUS.fields_by_name["substate"]._options = None _JOBREFERENCE.fields_by_name["project_id"]._options = None +_JOBREFERENCE.fields_by_name["job_id"]._options = None _YARNAPPLICATION.fields_by_name["name"]._options = None _YARNAPPLICATION.fields_by_name["state"]._options = None _YARNAPPLICATION.fields_by_name["progress"]._options = None _YARNAPPLICATION.fields_by_name["tracking_url"]._options = None _JOB_LABELSENTRY._options = None +_JOB.fields_by_name["reference"]._options = None _JOB.fields_by_name["placement"]._options = None +_JOB.fields_by_name["status"]._options = None +_JOB.fields_by_name["status_history"]._options = None +_JOB.fields_by_name["yarn_applications"]._options = None +_JOB.fields_by_name["submitted_by"]._options = None +_JOB.fields_by_name["driver_output_resource_uri"]._options = None +_JOB.fields_by_name["driver_control_files_uri"]._options = None +_JOB.fields_by_name["labels"]._options = None +_JOB.fields_by_name["scheduling"]._options = None +_JOB.fields_by_name["job_uuid"]._options = None +_JOBSCHEDULING.fields_by_name["max_failures_per_hour"]._options = None _SUBMITJOBREQUEST.fields_by_name["project_id"]._options = None _SUBMITJOBREQUEST.fields_by_name["region"]._options = None _SUBMITJOBREQUEST.fields_by_name["job"]._options = None +_SUBMITJOBREQUEST.fields_by_name["request_id"]._options = None _GETJOBREQUEST.fields_by_name["project_id"]._options = None _GETJOBREQUEST.fields_by_name["region"]._options = None _GETJOBREQUEST.fields_by_name["job_id"]._options = None _LISTJOBSREQUEST.fields_by_name["project_id"]._options = None _LISTJOBSREQUEST.fields_by_name["region"]._options = None +_LISTJOBSREQUEST.fields_by_name["page_size"]._options = None +_LISTJOBSREQUEST.fields_by_name["page_token"]._options = None +_LISTJOBSREQUEST.fields_by_name["cluster_name"]._options = None +_LISTJOBSREQUEST.fields_by_name["job_state_matcher"]._options = None +_LISTJOBSREQUEST.fields_by_name["filter"]._options = None _UPDATEJOBREQUEST.fields_by_name["project_id"]._options = None _UPDATEJOBREQUEST.fields_by_name["region"]._options = None _UPDATEJOBREQUEST.fields_by_name["job_id"]._options = None _UPDATEJOBREQUEST.fields_by_name["job"]._options = None _UPDATEJOBREQUEST.fields_by_name["update_mask"]._options = None +_LISTJOBSRESPONSE.fields_by_name["jobs"]._options = None +_LISTJOBSRESPONSE.fields_by_name["next_page_token"]._options = None _CANCELJOBREQUEST.fields_by_name["project_id"]._options = None _CANCELJOBREQUEST.fields_by_name["region"]._options = None _CANCELJOBREQUEST.fields_by_name["job_id"]._options = None @@ -4493,8 +4538,8 @@ serialized_options=_b( "\312A\027dataproc.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" ), - serialized_start=6411, - serialized_end=7686, + serialized_start=6758, + serialized_end=8033, methods=[ _descriptor.MethodDescriptor( name="SubmitJob", diff --git a/google/cloud/dataproc_v1beta2/proto/jobs_pb2_grpc.py b/google/cloud/dataproc_v1beta2/proto/jobs_pb2_grpc.py index 9a07fdbb..e9a36ead 100644 --- a/google/cloud/dataproc_v1beta2/proto/jobs_pb2_grpc.py +++ b/google/cloud/dataproc_v1beta2/proto/jobs_pb2_grpc.py @@ -84,9 +84,9 @@ def UpdateJob(self, request, context): def CancelJob(self, request, context): """Starts a job cancellation request. To access the job resource after cancellation, call - [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) + [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or - [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). + [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") diff --git a/google/cloud/dataproc_v1beta2/proto/operations.proto b/google/cloud/dataproc_v1beta2/proto/operations.proto index 74cbde3c..2e98fb82 100644 --- a/google/cloud/dataproc_v1beta2/proto/operations.proto +++ b/google/cloud/dataproc_v1beta2/proto/operations.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/dataproc_v1beta2/proto/shared.proto b/google/cloud/dataproc_v1beta2/proto/shared.proto index de1130d9..eba80918 100644 --- a/google/cloud/dataproc_v1beta2/proto/shared.proto +++ b/google/cloud/dataproc_v1beta2/proto/shared.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/dataproc_v1beta2/proto/workflow_templates.proto b/google/cloud/dataproc_v1beta2/proto/workflow_templates.proto index 2979593d..b8497e83 100644 --- a/google/cloud/dataproc_v1beta2/proto/workflow_templates.proto +++ b/google/cloud/dataproc_v1beta2/proto/workflow_templates.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -33,7 +32,7 @@ option java_outer_classname = "WorkflowTemplatesProto"; option java_package = "com.google.cloud.dataproc.v1beta2"; // The API interface for managing Workflow Templates in the -// Cloud Dataproc API. +// Dataproc API. service WorkflowTemplateService { option (google.api.default_host) = "dataproc.googleapis.com"; option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; @@ -78,9 +77,9 @@ service WorkflowTemplateService { // clusters to be deleted. // // The [Operation.metadata][google.longrunning.Operation.metadata] will be - // [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#workflowmetadata). + // [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#workflowmetadata). // Also see [Using - // WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + // WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). // // On successful completion, // [Operation.response][google.longrunning.Operation.response] will be @@ -119,9 +118,9 @@ service WorkflowTemplateService { // clusters to be deleted. // // The [Operation.metadata][google.longrunning.Operation.metadata] will be - // [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + // [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). // Also see [Using - // WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + // WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). // // On successful completion, // [Operation.response][google.longrunning.Operation.response] will be @@ -179,7 +178,7 @@ service WorkflowTemplateService { } } -// A Cloud Dataproc workflow template resource. +// A Dataproc workflow template resource. message WorkflowTemplate { option (google.api.resource) = { type: "dataproc.googleapis.com/WorkflowTemplate" @@ -327,22 +326,16 @@ message OrderedJob { // Required. The job definition. oneof job_type { - // Job is a Hadoop job. HadoopJob hadoop_job = 2; - // Job is a Spark job. SparkJob spark_job = 3; - // Job is a Pyspark job. PySparkJob pyspark_job = 4; - // Job is a Hive job. HiveJob hive_job = 5; - // Job is a Pig job. PigJob pig_job = 6; - // Job is a SparkSql job. SparkSqlJob spark_sql_job = 7; } @@ -465,7 +458,7 @@ message ValueValidation { repeated string values = 1; } -// A Cloud Dataproc workflow template resource. +// A Dataproc workflow template resource. message WorkflowMetadata { // The operation state. enum State { @@ -721,9 +714,7 @@ message UpdateWorkflowTemplateRequest { // Required. The updated workflow template. // // The `template.version` field must match the current version. - WorkflowTemplate template = 1 [ - (google.api.field_behavior) = REQUIRED - ]; + WorkflowTemplate template = 1 [(google.api.field_behavior) = REQUIRED]; } // A request to list workflow templates in a project. diff --git a/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2.py b/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2.py index af679f35..750be3c2 100644 --- a/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2.py +++ b/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2.py @@ -2497,7 +2497,7 @@ ), DESCRIPTOR=_WORKFLOWTEMPLATE, __module__="google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - __doc__="""A Cloud Dataproc workflow template resource. + __doc__="""A Dataproc workflow template resource. Attributes: @@ -2688,18 +2688,6 @@ hyphen. Must consist of between 3 and 50 characters. job_type: Required. The job definition. - hadoop_job: - Job is a Hadoop job. - spark_job: - Job is a Spark job. - pyspark_job: - Job is a Pyspark job. - hive_job: - Job is a Hive job. - pig_job: - Job is a Pig job. - spark_sql_job: - Job is a SparkSql job. labels: Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long. Label values must be between @@ -2859,7 +2847,7 @@ ), DESCRIPTOR=_WORKFLOWMETADATA, __module__="google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - __doc__="""A Cloud Dataproc workflow template resource. + __doc__="""A Dataproc workflow template resource. Attributes: diff --git a/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2_grpc.py b/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2_grpc.py index e05372f5..f9ea0bd6 100644 --- a/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2_grpc.py +++ b/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2_grpc.py @@ -12,7 +12,7 @@ class WorkflowTemplateServiceStub(object): """The API interface for managing Workflow Templates in the - Cloud Dataproc API. + Dataproc API. """ def __init__(self, channel): @@ -60,7 +60,7 @@ def __init__(self, channel): class WorkflowTemplateServiceServicer(object): """The API interface for managing Workflow Templates in the - Cloud Dataproc API. + Dataproc API. """ def CreateWorkflowTemplate(self, request, context): @@ -94,9 +94,9 @@ def InstantiateWorkflowTemplate(self, request, context): clusters to be deleted. The [Operation.metadata][google.longrunning.Operation.metadata] will be - [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#workflowmetadata). + [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#workflowmetadata). Also see [Using - WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). On successful completion, [Operation.response][google.longrunning.Operation.response] will be @@ -124,9 +124,9 @@ def InstantiateInlineWorkflowTemplate(self, request, context): clusters to be deleted. The [Operation.metadata][google.longrunning.Operation.metadata] will be - [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see [Using - WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). On successful completion, [Operation.response][google.longrunning.Operation.response] will be diff --git a/synth.metadata b/synth.metadata index c746925d..f7c667d5 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,39 +1,25 @@ { - "updateTime": "2020-02-20T23:02:30.926314Z", + "updateTime": "2020-03-03T13:17:24.564852Z", "sources": [ { "generator": { "name": "artman", - "version": "0.45.0", - "dockerImage": "googleapis/artman@sha256:6aec9c34db0e4be221cdaf6faba27bdc07cfea846808b3d3b964dfce3a9a0f9b" - } - }, - { - "git": { - "name": ".", - "remote": "https://github.com/googleapis/python-dataproc.git", - "sha": "3f7fd5bef4ba959b9a6e153fb4ab6d8b6819948e" + "version": "0.47.0", + "dockerImage": "googleapis/artman@sha256:b3e50d6b8de03920b9f065bbc3d210e2ca93a043446f1fa16cdf567393c09678" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "3eaaaf8626ce5b0c0bc7eee05e143beffa373b01", - "internalRef": "296274723", - "log": "3eaaaf8626ce5b0c0bc7eee05e143beffa373b01\nAdd BUILD.bazel for v1 secretmanager.googleapis.com\n\nPiperOrigin-RevId: 296274723\n\ne76149c3d992337f85eeb45643106aacae7ede82\nMove securitycenter v1 to use generate from annotations.\n\nPiperOrigin-RevId: 296266862\n\n203740c78ac69ee07c3bf6be7408048751f618f8\nAdd StackdriverLoggingConfig field to Cloud Tasks v2 API.\n\nPiperOrigin-RevId: 296256388\n\ne4117d5e9ed8bbca28da4a60a94947ca51cb2083\nCreate a Bazel BUILD file for the google.actions.type export.\n\nPiperOrigin-RevId: 296212567\n\na9639a0a9854fd6e1be08bba1ac3897f4f16cb2f\nAdd secretmanager.googleapis.com v1 protos\n\nPiperOrigin-RevId: 295983266\n\n" - } - }, - { - "git": { - "name": "synthtool", - "remote": "rpc://devrel/cloud/libraries/tools/autosynth", - "sha": "706a38c26db42299845396cdae55db635c38794a" + "sha": "4a180bfff8a21645b3a935c2756e8d6ab18a74e0", + "internalRef": "298484782", + "log": "4a180bfff8a21645b3a935c2756e8d6ab18a74e0\nautoml/v1beta1 publish proto updates\n\nPiperOrigin-RevId: 298484782\n\n6de6e938b7df1cd62396563a067334abeedb9676\nchore: use the latest gapic-generator and protoc-java-resource-name-plugin in Bazel workspace.\n\nPiperOrigin-RevId: 298474513\n\n244ab2b83a82076a1fa7be63b7e0671af73f5c02\nAdds service config definition for bigqueryreservation v1\n\nPiperOrigin-RevId: 298455048\n\n83c6f84035ee0f80eaa44d8b688a010461cc4080\nUpdate google/api/auth.proto to make AuthProvider to have JwtLocation\n\nPiperOrigin-RevId: 297918498\n\ne9e90a787703ec5d388902e2cb796aaed3a385b4\nDialogflow weekly v2/v2beta1 library update:\n - adding get validation result\n - adding field mask override control for output audio config\nImportant updates are also posted at:\nhttps://cloud.google.com/dialogflow/docs/release-notes\n\nPiperOrigin-RevId: 297671458\n\n1a2b05cc3541a5f7714529c665aecc3ea042c646\nAdding .yaml and .json config files.\n\nPiperOrigin-RevId: 297570622\n\ndfe1cf7be44dee31d78f78e485d8c95430981d6e\nPublish `QueryOptions` proto.\n\nIntroduced a `query_options` input in `ExecuteSqlRequest`.\n\nPiperOrigin-RevId: 297497710\n\ndafc905f71e5d46f500b41ed715aad585be062c3\npubsub: revert pull init_rpc_timeout & max_rpc_timeout back to 25 seconds and reset multiplier to 1.0\n\nPiperOrigin-RevId: 297486523\n\nf077632ba7fee588922d9e8717ee272039be126d\nfirestore: add update_transform\n\nPiperOrigin-RevId: 297405063\n\n0aba1900ffef672ec5f0da677cf590ee5686e13b\ncluster: use square brace for cross-reference\n\nPiperOrigin-RevId: 297204568\n\n5dac2da18f6325cbaed54603c43f0667ecd50247\nRestore retry params in gapic config because securitycenter has non-standard default retry params.\nRestore a few retry codes for some idempotent methods.\n\nPiperOrigin-RevId: 297196720\n\n1eb61455530252bba8b2c8d4bc9832960e5a56f6\npubsub: v1 replace IAM HTTP rules\n\nPiperOrigin-RevId: 297188590\n\n80b2d25f8d43d9d47024ff06ead7f7166548a7ba\nDialogflow weekly v2/v2beta1 library update:\n - updates to mega agent api\n - adding field mask override control for output audio config\nImportant updates are also posted at:\nhttps://cloud.google.com/dialogflow/docs/release-notes\n\nPiperOrigin-RevId: 297187629\n\n0b1876b35e98f560f9c9ca9797955f020238a092\nUse an older version of protoc-docs-plugin that is compatible with the specified gapic-generator and protobuf versions.\n\nprotoc-docs-plugin >=0.4.0 (see commit https://github.com/googleapis/protoc-docs-plugin/commit/979f03ede6678c487337f3d7e88bae58df5207af) is incompatible with protobuf 3.9.1.\n\nPiperOrigin-RevId: 296986742\n\n1e47e676cddbbd8d93f19ba0665af15b5532417e\nFix: Restore a method signature for UpdateCluster\n\nPiperOrigin-RevId: 296901854\n\n7f910bcc4fc4704947ccfd3ceed015d16b9e00c2\nUpdate Dataproc v1beta2 client.\n\nPiperOrigin-RevId: 296451205\n\nde287524405a3dce124d301634731584fc0432d7\nFix: Reinstate method signatures that had been missed off some RPCs\nFix: Correct resource types for two fields\n\nPiperOrigin-RevId: 296435091\n\ne5bc9566ae057fb4c92f8b7e047f1c8958235b53\nDeprecate the endpoint_uris field, as it is unused.\n\nPiperOrigin-RevId: 296357191\n\n8c12e2b4dca94e12bff9f538bdac29524ff7ef7a\nUpdate Dataproc v1 client.\n\nPiperOrigin-RevId: 296336662\n\n17567c4a1ef0a9b50faa87024d66f8acbb561089\nRemoving erroneous comment, a la https://github.com/googleapis/java-speech/pull/103\n\nPiperOrigin-RevId: 296332968\n\n" } }, { "template": { - "name": "python_split_library", + "name": "python_library", "origin": "synthtool.gcp", "version": "2020.2.4" }