From ee093a88841c7f9c9ea41b066993e56b4abe267d Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Mon, 28 Dec 2020 11:52:24 -0800 Subject: [PATCH] feat(v1beta1): remove DOCKER/FLINK from Component enum; adds HBASE (#108) Breaking change in v1beta1: 1. The `DOCKER` and `FLINK` values have been removed from the `Component` enum, and an `HBASE` value was added. Other changes: 1. There is a new `temp_bucket` field in `ClusterConfig`. 2. There is a new `preemptibility` field in `InstanceGroupConfig`. 3. The `project_id` field of `JobReference` is now optional instead of required. 4. There is a new `dag_timeout` field in `WorkflowTemplate`. 5. There are new `dag_timeout`, `dag_start_time`, and `dag_end_time` fields in `WorkflowMetadata`. 6. There are various updates to the doc comments. --- google/cloud/dataproc_v1/__init__.py | 4 +- google/cloud/dataproc_v1beta2/__init__.py | 4 +- .../proto/autoscaling_policies.proto | 64 ++++-- .../dataproc_v1beta2/proto/clusters.proto | 45 ++++- .../cloud/dataproc_v1beta2/proto/jobs.proto | 29 +-- .../cloud/dataproc_v1beta2/proto/shared.proto | 9 +- .../proto/workflow_templates.proto | 129 ++++++++---- .../workflow_template_service/async_client.py | 1 + .../workflow_template_service/client.py | 1 + .../types/autoscaling_policies.py | 31 +-- .../cloud/dataproc_v1beta2/types/clusters.py | 38 +++- google/cloud/dataproc_v1beta2/types/jobs.py | 43 ++-- google/cloud/dataproc_v1beta2/types/shared.py | 3 +- .../types/workflow_templates.py | 52 ++++- synth.metadata | 191 +++++++++++++++++- 15 files changed, 510 insertions(+), 134 deletions(-) diff --git a/google/cloud/dataproc_v1/__init__.py b/google/cloud/dataproc_v1/__init__.py index 82d780ab..b17ac4f8 100644 --- a/google/cloud/dataproc_v1/__init__.py +++ b/google/cloud/dataproc_v1/__init__.py @@ -115,7 +115,6 @@ "CancelJobRequest", "Cluster", "ClusterConfig", - "ClusterControllerClient", "ClusterMetrics", "ClusterOperation", "ClusterOperationMetadata", @@ -192,6 +191,7 @@ "WorkflowNode", "WorkflowTemplate", "WorkflowTemplatePlacement", - "YarnApplication", "WorkflowTemplateServiceClient", + "YarnApplication", + "ClusterControllerClient", ) diff --git a/google/cloud/dataproc_v1beta2/__init__.py b/google/cloud/dataproc_v1beta2/__init__.py index 1a0d3c1a..f7a24164 100644 --- a/google/cloud/dataproc_v1beta2/__init__.py +++ b/google/cloud/dataproc_v1beta2/__init__.py @@ -149,7 +149,6 @@ "InstantiateInlineWorkflowTemplateRequest", "InstantiateWorkflowTemplateRequest", "Job", - "JobControllerClient", "JobMetadata", "JobPlacement", "JobReference", @@ -194,6 +193,7 @@ "WorkflowNode", "WorkflowTemplate", "WorkflowTemplatePlacement", - "YarnApplication", "WorkflowTemplateServiceClient", + "YarnApplication", + "JobControllerClient", ) diff --git a/google/cloud/dataproc_v1beta2/proto/autoscaling_policies.proto b/google/cloud/dataproc_v1beta2/proto/autoscaling_policies.proto index a7d6376b..7601cca8 100644 --- a/google/cloud/dataproc_v1beta2/proto/autoscaling_policies.proto +++ b/google/cloud/dataproc_v1beta2/proto/autoscaling_policies.proto @@ -36,10 +36,12 @@ option (google.api.resource_definition) = { // Cloud Dataproc API. service AutoscalingPolicyService { option (google.api.default_host) = "dataproc.googleapis.com"; - option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; // Creates new autoscaling policy. - rpc CreateAutoscalingPolicy(CreateAutoscalingPolicyRequest) returns (AutoscalingPolicy) { + rpc CreateAutoscalingPolicy(CreateAutoscalingPolicyRequest) + returns (AutoscalingPolicy) { option (google.api.http) = { post: "/v1beta2/{parent=projects/*/locations/*}/autoscalingPolicies" body: "policy" @@ -55,7 +57,8 @@ service AutoscalingPolicyService { // // Disabled check for update_mask, because all updates will be full // replacements. - rpc UpdateAutoscalingPolicy(UpdateAutoscalingPolicyRequest) returns (AutoscalingPolicy) { + rpc UpdateAutoscalingPolicy(UpdateAutoscalingPolicyRequest) + returns (AutoscalingPolicy) { option (google.api.http) = { put: "/v1beta2/{policy.name=projects/*/locations/*/autoscalingPolicies/*}" body: "policy" @@ -68,7 +71,8 @@ service AutoscalingPolicyService { } // Retrieves autoscaling policy. - rpc GetAutoscalingPolicy(GetAutoscalingPolicyRequest) returns (AutoscalingPolicy) { + rpc GetAutoscalingPolicy(GetAutoscalingPolicyRequest) + returns (AutoscalingPolicy) { option (google.api.http) = { get: "/v1beta2/{name=projects/*/locations/*/autoscalingPolicies/*}" additional_bindings { @@ -79,7 +83,8 @@ service AutoscalingPolicyService { } // Lists autoscaling policies in the project. - rpc ListAutoscalingPolicies(ListAutoscalingPoliciesRequest) returns (ListAutoscalingPoliciesResponse) { + rpc ListAutoscalingPolicies(ListAutoscalingPoliciesRequest) + returns (ListAutoscalingPoliciesResponse) { option (google.api.http) = { get: "/v1beta2/{parent=projects/*/locations/*}/autoscalingPolicies" additional_bindings { @@ -91,7 +96,8 @@ service AutoscalingPolicyService { // Deletes an autoscaling policy. It is an error to delete an autoscaling // policy that is in use by one or more clusters. - rpc DeleteAutoscalingPolicy(DeleteAutoscalingPolicyRequest) returns (google.protobuf.Empty) { + rpc DeleteAutoscalingPolicy(DeleteAutoscalingPolicyRequest) + returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v1beta2/{name=projects/*/locations/*/autoscalingPolicies/*}" additional_bindings { @@ -136,22 +142,26 @@ message AutoscalingPolicy { } // Required. Describes how the autoscaler will operate for primary workers. - InstanceGroupAutoscalingPolicyConfig worker_config = 4 [(google.api.field_behavior) = REQUIRED]; + InstanceGroupAutoscalingPolicyConfig worker_config = 4 + [(google.api.field_behavior) = REQUIRED]; // Optional. Describes how the autoscaler will operate for secondary workers. - InstanceGroupAutoscalingPolicyConfig secondary_worker_config = 5 [(google.api.field_behavior) = OPTIONAL]; + InstanceGroupAutoscalingPolicyConfig secondary_worker_config = 5 + [(google.api.field_behavior) = OPTIONAL]; } // Basic algorithm for autoscaling. message BasicAutoscalingAlgorithm { // Required. YARN autoscaling configuration. - BasicYarnAutoscalingConfig yarn_config = 1 [(google.api.field_behavior) = REQUIRED]; + BasicYarnAutoscalingConfig yarn_config = 1 + [(google.api.field_behavior) = REQUIRED]; // Optional. Duration between scaling events. A scaling period starts after // the update operation from the previous event has completed. // // Bounds: [2m, 1d]. Default: 2m. - google.protobuf.Duration cooldown_period = 2 [(google.api.field_behavior) = OPTIONAL]; + google.protobuf.Duration cooldown_period = 2 + [(google.api.field_behavior) = OPTIONAL]; } // Basic autoscaling configurations for YARN. @@ -162,22 +172,29 @@ message BasicYarnAutoscalingConfig { // downscaling operations. // // Bounds: [0s, 1d]. - google.protobuf.Duration graceful_decommission_timeout = 5 [(google.api.field_behavior) = REQUIRED]; - - // Required. Fraction of average pending memory in the last cooldown period - // for which to add workers. A scale-up factor of 1.0 will result in scaling - // up so that there is no pending memory remaining after the update (more - // aggressive scaling). A scale-up factor closer to 0 will result in a smaller - // magnitude of scaling up (less aggressive scaling). + google.protobuf.Duration graceful_decommission_timeout = 5 + [(google.api.field_behavior) = REQUIRED]; + + // Required. Fraction of average YARN pending memory in the last cooldown + // period for which to add workers. A scale-up factor of 1.0 will result in + // scaling up so that there is no pending memory remaining after the update + // (more aggressive scaling). A scale-up factor closer to 0 will result in a + // smaller magnitude of scaling up (less aggressive scaling). See [How + // autoscaling + // works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + // for more information. // // Bounds: [0.0, 1.0]. double scale_up_factor = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. Fraction of average pending memory in the last cooldown period - // for which to remove workers. A scale-down factor of 1 will result in + // Required. Fraction of average YARN pending memory in the last cooldown + // period for which to remove workers. A scale-down factor of 1 will result in // scaling down so that there is no available memory remaining after the // update (more aggressive scaling). A scale-down factor of 0 disables // removing workers, which can be beneficial for autoscaling a single job. + // See [How autoscaling + // works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) + // for more information. // // Bounds: [0.0, 1.0]. double scale_down_factor = 2 [(google.api.field_behavior) = REQUIRED]; @@ -189,7 +206,8 @@ message BasicYarnAutoscalingConfig { // on any recommended change. // // Bounds: [0.0, 1.0]. Default: 0.0. - double scale_up_min_worker_fraction = 3 [(google.api.field_behavior) = OPTIONAL]; + double scale_up_min_worker_fraction = 3 + [(google.api.field_behavior) = OPTIONAL]; // Optional. Minimum scale-down threshold as a fraction of total cluster size // before scaling occurs. For example, in a 20-worker cluster, a threshold of @@ -198,7 +216,8 @@ message BasicYarnAutoscalingConfig { // on any recommended change. // // Bounds: [0.0, 1.0]. Default: 0.0. - double scale_down_min_worker_fraction = 4 [(google.api.field_behavior) = OPTIONAL]; + double scale_down_min_worker_fraction = 4 + [(google.api.field_behavior) = OPTIONAL]; } // Configuration for the size bounds of an instance group, including its @@ -341,7 +360,8 @@ message ListAutoscalingPoliciesRequest { // A response to a request to list autoscaling policies in a project. message ListAutoscalingPoliciesResponse { // Output only. Autoscaling policies list. - repeated AutoscalingPolicy policies = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated AutoscalingPolicy policies = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. This token is included in the response if there are more // results to fetch. diff --git a/google/cloud/dataproc_v1beta2/proto/clusters.proto b/google/cloud/dataproc_v1beta2/proto/clusters.proto index 93e6fe79..49e32305 100644 --- a/google/cloud/dataproc_v1beta2/proto/clusters.proto +++ b/google/cloud/dataproc_v1beta2/proto/clusters.proto @@ -171,6 +171,17 @@ message ClusterConfig { // bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). string config_bucket = 1 [(google.api.field_behavior) = OPTIONAL]; + // Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, + // such as Spark and MapReduce history files. + // If you do not specify a temp bucket, + // Dataproc will determine a Cloud Storage location (US, + // ASIA, or EU) for your cluster's temp bucket according to the + // Compute Engine zone where your cluster is deployed, and then create + // and manage this project-level, per-location bucket. The default bucket has + // a TTL of 90 days, but you can use any TTL (or none) if you specify a + // bucket. + string temp_bucket = 2 [(google.api.field_behavior) = OPTIONAL]; + // Optional. The shared Compute Engine config settings for // all instances in a cluster. GceClusterConfig gce_cluster_config = 8 [(google.api.field_behavior) = OPTIONAL]; @@ -330,7 +341,7 @@ message GceClusterConfig { bool internal_ip_only = 7 [(google.api.field_behavior) = OPTIONAL]; // Optional. The [Dataproc service - // account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) + // account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) // (also see [VM Data Plane // identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) // used by Dataproc cluster VM instances to access Google Cloud Platform @@ -374,6 +385,27 @@ message GceClusterConfig { // The config settings for Compute Engine resources in // an instance group, such as a master or worker group. message InstanceGroupConfig { + // Controls the use of + // [preemptible instances] + // (https://cloud.google.com/compute/docs/instances/preemptible) + // within the group. + enum Preemptibility { + // Preemptibility is unspecified, the system will choose the + // appropriate setting for each instance group. + PREEMPTIBILITY_UNSPECIFIED = 0; + + // Instances are non-preemptible. + // + // This option is allowed for all instance groups and is the only valid + // value for Master and Worker instance groups. + NON_PREEMPTIBLE = 1; + + // Instances are preemptible. + // + // This option is allowed only for secondary worker groups. + PREEMPTIBLE = 2; + } + // Optional. The number of VM instances in the instance group. // For master instance groups, must be set to 1. int32 num_instances = 1 [(google.api.field_behavior) = OPTIONAL]; @@ -424,6 +456,15 @@ message InstanceGroupConfig { // instances. bool is_preemptible = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Optional. Specifies the preemptibility of the instance group. + // + // The default value for master and worker groups is + // `NON_PREEMPTIBLE`. This default cannot be changed. + // + // The default value for secondary instances is + // `PREEMPTIBLE`. + Preemptibility preemptibility = 10 [(google.api.field_behavior) = OPTIONAL]; + // Output only. The config for Compute Engine Instance Group // Manager that manages this group. // This is only used for preemptible instance groups. @@ -685,7 +726,7 @@ message ClusterStatus { message SoftwareConfig { // Optional. The version of software inside the cluster. It must be one of the // supported [Dataproc - // Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), + // Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), // such as "1.2" (including a subminor version, such as "1.2.29"), or the // ["preview" // version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). diff --git a/google/cloud/dataproc_v1beta2/proto/jobs.proto b/google/cloud/dataproc_v1beta2/proto/jobs.proto index 9d9aaae0..c99f6791 100644 --- a/google/cloud/dataproc_v1beta2/proto/jobs.proto +++ b/google/cloud/dataproc_v1beta2/proto/jobs.proto @@ -224,12 +224,12 @@ message SparkJob { // Spark driver and tasks. repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; - // Optional. HCFS URIs of files to be copied to the working directory of - // Spark drivers and distributed tasks. Useful for naively parallel tasks. + // Optional. HCFS URIs of files to be placed in the working directory of + // each executor. Useful for naively parallel tasks. repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; - // Optional. HCFS URIs of archives to be extracted in the working directory - // of Spark drivers and tasks. Supported file types: + // Optional. HCFS URIs of archives to be extracted into the working directory + // of each executor. Supported file types: // .jar, .tar, .tar.gz, .tgz, and .zip. repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; @@ -265,11 +265,12 @@ message PySparkJob { // Python driver and tasks. repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; - // Optional. HCFS URIs of files to be copied to the working directory of - // Python drivers and distributed tasks. Useful for naively parallel tasks. + // Optional. HCFS URIs of files to be placed in the working directory of + // each executor. Useful for naively parallel tasks. repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; - // Optional. HCFS URIs of archives to be extracted in the working directory of + // Optional. HCFS URIs of archives to be extracted into the working directory + // of each executor. Supported file types: // .jar, .tar, .tar.gz, .tgz, and .zip. repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; @@ -414,12 +415,12 @@ message SparkRJob { // occur that causes an incorrect job submission. repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; - // Optional. HCFS URIs of files to be copied to the working directory of - // R drivers and distributed tasks. Useful for naively parallel tasks. + // Optional. HCFS URIs of files to be placed in the working directory of + // each executor. Useful for naively parallel tasks. repeated string file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; - // Optional. HCFS URIs of archives to be extracted in the working directory of - // Spark drivers and tasks. Supported file types: + // Optional. HCFS URIs of archives to be extracted into the working directory + // of each executor. Supported file types: // .jar, .tar, .tar.gz, .tgz, and .zip. repeated string archive_uris = 4 [(google.api.field_behavior) = OPTIONAL]; @@ -562,9 +563,9 @@ message JobStatus { // Encapsulates the full scoping used to reference a job. message JobReference { - // Required. The ID of the Google Cloud Platform project that the job - // belongs to. - string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + // Optional. The ID of the Google Cloud Platform project that the job belongs to. If + // specified, must match the request project ID. + string project_id = 1 [(google.api.field_behavior) = OPTIONAL]; // Optional. The job ID, which must be unique within the project. // The ID must contain only letters (a-z, A-Z), numbers (0-9), diff --git a/google/cloud/dataproc_v1beta2/proto/shared.proto b/google/cloud/dataproc_v1beta2/proto/shared.proto index 130ae554..ac474aa5 100644 --- a/google/cloud/dataproc_v1beta2/proto/shared.proto +++ b/google/cloud/dataproc_v1beta2/proto/shared.proto @@ -25,20 +25,17 @@ option java_package = "com.google.cloud.dataproc.v1beta2"; // Cluster components that can be activated. enum Component { - // Unspecified component. + // Unspecified component. Specifying this will cause Cluster creation to fail. COMPONENT_UNSPECIFIED = 0; // The Anaconda python distribution. ANACONDA = 5; - // Docker - DOCKER = 13; - // The Druid query engine. DRUID = 9; - // Flink - FLINK = 14; + // HBase. + HBASE = 11; // The Hive Web HCatalog (the REST service for accessing HCatalog). HIVE_WEBHCAT = 3; diff --git a/google/cloud/dataproc_v1beta2/proto/workflow_templates.proto b/google/cloud/dataproc_v1beta2/proto/workflow_templates.proto index e5ef680b..48f2f719 100644 --- a/google/cloud/dataproc_v1beta2/proto/workflow_templates.proto +++ b/google/cloud/dataproc_v1beta2/proto/workflow_templates.proto @@ -23,6 +23,7 @@ import "google/api/resource.proto"; import "google/cloud/dataproc/v1beta2/clusters.proto"; import "google/cloud/dataproc/v1beta2/jobs.proto"; import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; @@ -35,10 +36,12 @@ option java_package = "com.google.cloud.dataproc.v1beta2"; // Dataproc API. service WorkflowTemplateService { option (google.api.default_host) = "dataproc.googleapis.com"; - option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; // Creates new workflow template. - rpc CreateWorkflowTemplate(CreateWorkflowTemplateRequest) returns (WorkflowTemplate) { + rpc CreateWorkflowTemplate(CreateWorkflowTemplateRequest) + returns (WorkflowTemplate) { option (google.api.http) = { post: "/v1beta2/{parent=projects/*/regions/*}/workflowTemplates" body: "template" @@ -54,7 +57,8 @@ service WorkflowTemplateService { // // Can retrieve previously instantiated template by specifying optional // version parameter. - rpc GetWorkflowTemplate(GetWorkflowTemplateRequest) returns (WorkflowTemplate) { + rpc GetWorkflowTemplate(GetWorkflowTemplateRequest) + returns (WorkflowTemplate) { option (google.api.http) = { get: "/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}" additional_bindings { @@ -84,7 +88,8 @@ service WorkflowTemplateService { // On successful completion, // [Operation.response][google.longrunning.Operation.response] will be // [Empty][google.protobuf.Empty]. - rpc InstantiateWorkflowTemplate(InstantiateWorkflowTemplateRequest) returns (google.longrunning.Operation) { + rpc InstantiateWorkflowTemplate(InstantiateWorkflowTemplateRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}:instantiate" body: "*" @@ -104,7 +109,8 @@ service WorkflowTemplateService { // Instantiates a template and begins execution. // // This method is equivalent to executing the sequence - // [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], + // [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], + // [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], // [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. // // The returned Operation can be used to track execution of @@ -125,7 +131,9 @@ service WorkflowTemplateService { // On successful completion, // [Operation.response][google.longrunning.Operation.response] will be // [Empty][google.protobuf.Empty]. - rpc InstantiateInlineWorkflowTemplate(InstantiateInlineWorkflowTemplateRequest) returns (google.longrunning.Operation) { + rpc InstantiateInlineWorkflowTemplate( + InstantiateInlineWorkflowTemplateRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1beta2/{parent=projects/*/locations/*}/workflowTemplates:instantiateInline" body: "template" @@ -143,7 +151,8 @@ service WorkflowTemplateService { // Updates (replaces) workflow template. The updated template // must contain version that matches the current server version. - rpc UpdateWorkflowTemplate(UpdateWorkflowTemplateRequest) returns (WorkflowTemplate) { + rpc UpdateWorkflowTemplate(UpdateWorkflowTemplateRequest) + returns (WorkflowTemplate) { option (google.api.http) = { put: "/v1beta2/{template.name=projects/*/regions/*/workflowTemplates/*}" body: "template" @@ -156,7 +165,8 @@ service WorkflowTemplateService { } // Lists workflows that match the specified filter in the request. - rpc ListWorkflowTemplates(ListWorkflowTemplatesRequest) returns (ListWorkflowTemplatesResponse) { + rpc ListWorkflowTemplates(ListWorkflowTemplatesRequest) + returns (ListWorkflowTemplatesResponse) { option (google.api.http) = { get: "/v1beta2/{parent=projects/*/regions/*}/workflowTemplates" additional_bindings { @@ -167,7 +177,8 @@ service WorkflowTemplateService { } // Deletes a workflow template. It does not cancel in-progress workflows. - rpc DeleteWorkflowTemplate(DeleteWorkflowTemplateRequest) returns (google.protobuf.Empty) { + rpc DeleteWorkflowTemplate(DeleteWorkflowTemplateRequest) + returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}" additional_bindings { @@ -220,10 +231,12 @@ message WorkflowTemplate { int32 version = 3 [(google.api.field_behavior) = OPTIONAL]; // Output only. The time template was created. - google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp create_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The time template was last updated. - google.protobuf.Timestamp update_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp update_time = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Optional. The labels to associate with this template. These labels // will be propagated to all jobs and clusters created by the workflow @@ -248,7 +261,20 @@ message WorkflowTemplate { // Optional. Template parameters whose values are substituted into the // template. Values for parameters must be provided when the template is // instantiated. - repeated TemplateParameter parameters = 9 [(google.api.field_behavior) = OPTIONAL]; + repeated TemplateParameter parameters = 9 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Timeout duration for the DAG of jobs. You can use "s", "m", "h", + // and "d" suffixes for second, minute, hour, and day duration values, + // respectively. The timeout duration must be from 10 minutes ("10m") to 24 + // hours ("24h" or "1d"). The timer begins when the first job is submitted. If + // the workflow is running at the end of the timeout period, any remaining + // jobs are cancelled, the workflow is terminated, and if the workflow was + // running on a [managed + // cluster](https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), + // the cluster is deleted. + google.protobuf.Duration dag_timeout = 10 + [(google.api.field_behavior) = OPTIONAL]; } // Specifies workflow execution target. @@ -316,8 +342,8 @@ message OrderedJob { // // The step id is used as prefix for job id, as job // `goog-dataproc-workflow-step-id` label, and in - // [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other - // steps. + // [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] + // field from other steps. // // The id must contain only letters (a-z, A-Z), numbers (0-9), // underscores (_), and hyphens (-). Cannot begin or end with underscore @@ -326,23 +352,29 @@ message OrderedJob { // Required. The job definition. oneof job_type { - HadoopJob hadoop_job = 2; + // Optional. Job is a Hadoop job. + HadoopJob hadoop_job = 2 [(google.api.field_behavior) = OPTIONAL]; - SparkJob spark_job = 3; + // Optional. Job is a Spark job. + SparkJob spark_job = 3 [(google.api.field_behavior) = OPTIONAL]; - PySparkJob pyspark_job = 4; + // Optional. Job is a PySpark job. + PySparkJob pyspark_job = 4 [(google.api.field_behavior) = OPTIONAL]; - HiveJob hive_job = 5; + // Optional. Job is a Hive job. + HiveJob hive_job = 5 [(google.api.field_behavior) = OPTIONAL]; - PigJob pig_job = 6; + // Optional. Job is a Pig job. + PigJob pig_job = 6 [(google.api.field_behavior) = OPTIONAL]; - // Spark R job - SparkRJob spark_r_job = 11; + // Optional. Job is a SparkR job. + SparkRJob spark_r_job = 11 [(google.api.field_behavior) = OPTIONAL]; - SparkSqlJob spark_sql_job = 7; + // Optional. Job is a SparkSql job. + SparkSqlJob spark_sql_job = 7 [(google.api.field_behavior) = OPTIONAL]; - // Presto job - PrestoJob presto_job = 12; + // Optional. Job is a Presto job. + PrestoJob presto_job = 12 [(google.api.field_behavior) = OPTIONAL]; } // Optional. The labels to associate with this job. @@ -362,7 +394,8 @@ message OrderedJob { // Optional. The optional list of prerequisite job step_ids. // If not specified, the job will start at the beginning of workflow. - repeated string prerequisite_step_ids = 10 [(google.api.field_behavior) = OPTIONAL]; + repeated string prerequisite_step_ids = 10 + [(google.api.field_behavior) = OPTIONAL]; } // A configurable parameter that replaces one or more fields in the template. @@ -388,10 +421,10 @@ message TemplateParameter { // A field is allowed to appear in at most one parameter's list of field // paths. // - // A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask]. - // For example, a field path that references the zone field of a workflow - // template's cluster selector would be specified as - // `placement.clusterSelector.zone`. + // A field path is similar in syntax to a + // [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a + // field path that references the zone field of a workflow template's cluster + // selector would be specified as `placement.clusterSelector.zone`. // // Also, field paths can reference fields using the following syntax: // @@ -498,13 +531,15 @@ message WorkflowMetadata { int32 version = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The create cluster operation metadata. - ClusterOperation create_cluster = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + ClusterOperation create_cluster = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The workflow graph. WorkflowGraph graph = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The delete cluster operation metadata. - ClusterOperation delete_cluster = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + ClusterOperation delete_cluster = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The workflow state. State state = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; @@ -516,13 +551,35 @@ message WorkflowMetadata { map parameters = 8; // Output only. Workflow start time. - google.protobuf.Timestamp start_time = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp start_time = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Workflow end time. - google.protobuf.Timestamp end_time = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp end_time = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The UUID of target cluster. string cluster_uuid = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The timeout duration for the DAG of jobs. + // Minimum timeout duration is 10 minutes and maximum is 24 hours, expressed + // as a + // [google.protobuf.Duration][https://developers.google.com/protocol-buffers/docs/proto3#json_mapping]. + // For example, "1800" = 1800 seconds/30 minutes duration. + google.protobuf.Duration dag_timeout = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. DAG start time, which is only set for workflows with + // [dag_timeout][google.cloud.dataproc.v1beta2.WorkflowMetadata.dag_timeout] + // when the DAG begins. + google.protobuf.Timestamp dag_start_time = 13 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. DAG end time, which is only set for workflows with + // [dag_timeout][google.cloud.dataproc.v1beta2.WorkflowMetadata.dag_timeout] + // when the DAG ends. + google.protobuf.Timestamp dag_end_time = 14 + [(google.api.field_behavior) = OUTPUT_ONLY]; } // The cluster operation triggered by a workflow. @@ -571,7 +628,8 @@ message WorkflowNode { string step_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Node's prerequisite nodes. - repeated string prerequisite_step_ids = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated string prerequisite_step_ids = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The job id; populated after the node enters RUNNING state. string job_id = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; @@ -753,7 +811,8 @@ message ListWorkflowTemplatesRequest { // A response to a request to list workflow templates in a project. message ListWorkflowTemplatesResponse { // Output only. WorkflowTemplates list. - repeated WorkflowTemplate templates = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated WorkflowTemplate templates = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. This token is included in the response if there are more // results to fetch. To fetch additional results, provide this value as the diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py index e6dbfb51..346e6875 100644 --- a/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py @@ -32,6 +32,7 @@ from google.api_core import operation_async # type: ignore from google.cloud.dataproc_v1beta2.services.workflow_template_service import pagers from google.cloud.dataproc_v1beta2.types import workflow_templates +from google.protobuf import duration_pb2 as duration # type: ignore from google.protobuf import empty_pb2 as empty # type: ignore from google.protobuf import timestamp_pb2 as timestamp # type: ignore diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py index f383a694..0584fd77 100644 --- a/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py @@ -36,6 +36,7 @@ from google.api_core import operation_async # type: ignore from google.cloud.dataproc_v1beta2.services.workflow_template_service import pagers from google.cloud.dataproc_v1beta2.types import workflow_templates +from google.protobuf import duration_pb2 as duration # type: ignore from google.protobuf import empty_pb2 as empty # type: ignore from google.protobuf import timestamp_pb2 as timestamp # type: ignore diff --git a/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py b/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py index ebc355c6..1a3c408f 100644 --- a/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py +++ b/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py @@ -123,22 +123,27 @@ class BasicYarnAutoscalingConfig(proto.Message): Bounds: [0s, 1d]. scale_up_factor (float): - Required. Fraction of average pending memory in the last - cooldown period for which to add workers. A scale-up factor - of 1.0 will result in scaling up so that there is no pending - memory remaining after the update (more aggressive scaling). - A scale-up factor closer to 0 will result in a smaller - magnitude of scaling up (less aggressive scaling). + Required. Fraction of average YARN pending memory in the + last cooldown period for which to add workers. A scale-up + factor of 1.0 will result in scaling up so that there is no + pending memory remaining after the update (more aggressive + scaling). A scale-up factor closer to 0 will result in a + smaller magnitude of scaling up (less aggressive scaling). + See `How autoscaling + works `__ + for more information. Bounds: [0.0, 1.0]. scale_down_factor (float): - Required. Fraction of average pending memory in the last - cooldown period for which to remove workers. A scale-down - factor of 1 will result in scaling down so that there is no - available memory remaining after the update (more aggressive - scaling). A scale-down factor of 0 disables removing - workers, which can be beneficial for autoscaling a single - job. + Required. Fraction of average YARN pending memory in the + last cooldown period for which to remove workers. A + scale-down factor of 1 will result in scaling down so that + there is no available memory remaining after the update + (more aggressive scaling). A scale-down factor of 0 disables + removing workers, which can be beneficial for autoscaling a + single job. See `How autoscaling + works `__ + for more information. Bounds: [0.0, 1.0]. scale_up_min_worker_fraction (float): diff --git a/google/cloud/dataproc_v1beta2/types/clusters.py b/google/cloud/dataproc_v1beta2/types/clusters.py index bb747d0a..a81c517f 100644 --- a/google/cloud/dataproc_v1beta2/types/clusters.py +++ b/google/cloud/dataproc_v1beta2/types/clusters.py @@ -131,6 +131,18 @@ class ClusterConfig(proto.Message): and manage this project-level, per-location bucket (see `Dataproc staging bucket `__). + temp_bucket (str): + Optional. A Cloud Storage bucket used to + store ephemeral cluster and jobs data, such as + Spark and MapReduce history files. If you do not + specify a temp bucket, + Dataproc will determine a Cloud Storage location + (US, ASIA, or EU) for your cluster's temp bucket + according to the Compute Engine zone where your + cluster is deployed, and then create and manage + this project-level, per-location bucket. The + default bucket has a TTL of 90 days, but you can + use any TTL (or none) if you specify a bucket. gce_cluster_config (~.gcd_clusters.GceClusterConfig): Optional. The shared Compute Engine config settings for all instances in a cluster. @@ -188,6 +200,8 @@ class ClusterConfig(proto.Message): config_bucket = proto.Field(proto.STRING, number=1) + temp_bucket = proto.Field(proto.STRING, number=2) + gce_cluster_config = proto.Field( proto.MESSAGE, number=8, message="GceClusterConfig", ) @@ -360,7 +374,7 @@ class GceClusterConfig(proto.Message): external IP addresses. service_account (str): Optional. The `Dataproc service - account `__ + account `__ (also see `VM Data Plane identity `__) used by Dataproc cluster VM instances to access Google Cloud @@ -472,6 +486,15 @@ class InstanceGroupConfig(proto.Message): is_preemptible (bool): Output only. Specifies that this instance group contains preemptible instances. + preemptibility (~.gcd_clusters.InstanceGroupConfig.Preemptibility): + Optional. Specifies the preemptibility of the instance + group. + + The default value for master and worker groups is + ``NON_PREEMPTIBLE``. This default cannot be changed. + + The default value for secondary instances is + ``PREEMPTIBLE``. managed_group_config (~.gcd_clusters.ManagedGroupConfig): Output only. The config for Compute Engine Instance Group Manager that manages this group. @@ -486,6 +509,15 @@ class InstanceGroupConfig(proto.Message): Platform `__. """ + class Preemptibility(proto.Enum): + r"""Controls the use of [preemptible instances] + (https://cloud.google.com/compute/docs/instances/preemptible) within + the group. + """ + PREEMPTIBILITY_UNSPECIFIED = 0 + NON_PREEMPTIBLE = 1 + PREEMPTIBLE = 2 + num_instances = proto.Field(proto.INT32, number=1) instance_names = proto.RepeatedField(proto.STRING, number=2) @@ -498,6 +530,8 @@ class InstanceGroupConfig(proto.Message): is_preemptible = proto.Field(proto.BOOL, number=6) + preemptibility = proto.Field(proto.ENUM, number=10, enum=Preemptibility,) + managed_group_config = proto.Field( proto.MESSAGE, number=7, message="ManagedGroupConfig", ) @@ -827,7 +861,7 @@ class SoftwareConfig(proto.Message): image_version (str): Optional. The version of software inside the cluster. It must be one of the supported `Dataproc - Versions `__, + Versions `__, such as "1.2" (including a subminor version, such as "1.2.29"), or the `"preview" version `__. diff --git a/google/cloud/dataproc_v1beta2/types/jobs.py b/google/cloud/dataproc_v1beta2/types/jobs.py index 02a3e4ec..c3b57d43 100644 --- a/google/cloud/dataproc_v1beta2/types/jobs.py +++ b/google/cloud/dataproc_v1beta2/types/jobs.py @@ -174,15 +174,14 @@ class SparkJob(proto.Message): Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks. file_uris (Sequence[str]): - Optional. HCFS URIs of files to be copied to - the working directory of Spark drivers and - distributed tasks. Useful for naively parallel - tasks. + Optional. HCFS URIs of files to be placed in + the working directory of each executor. Useful + for naively parallel tasks. archive_uris (Sequence[str]): Optional. HCFS URIs of archives to be - extracted in the working directory of Spark - drivers and tasks. Supported file types: .jar, - .tar, .tar.gz, .tgz, and .zip. + extracted into the working directory of each + executor. Supported file types: .jar, .tar, + .tar.gz, .tgz, and .zip. properties (Sequence[~.gcd_jobs.SparkJob.PropertiesEntry]): Optional. A mapping of property names to values, used to configure Spark. Properties that @@ -234,14 +233,14 @@ class PySparkJob(proto.Message): Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks. file_uris (Sequence[str]): - Optional. HCFS URIs of files to be copied to - the working directory of Python drivers and - distributed tasks. Useful for naively parallel - tasks. + Optional. HCFS URIs of files to be placed in + the working directory of each executor. Useful + for naively parallel tasks. archive_uris (Sequence[str]): Optional. HCFS URIs of archives to be - extracted in the working directory of .jar, - .tar, .tar.gz, .tgz, and .zip. + extracted into the working directory of each + executor. Supported file types: .jar, .tar, + .tar.gz, .tgz, and .zip. properties (Sequence[~.gcd_jobs.PySparkJob.PropertiesEntry]): Optional. A mapping of property names to values, used to configure PySpark. Properties @@ -450,15 +449,14 @@ class SparkRJob(proto.Message): job properties, since a collision may occur that causes an incorrect job submission. file_uris (Sequence[str]): - Optional. HCFS URIs of files to be copied to - the working directory of R drivers and - distributed tasks. Useful for naively parallel - tasks. + Optional. HCFS URIs of files to be placed in + the working directory of each executor. Useful + for naively parallel tasks. archive_uris (Sequence[str]): Optional. HCFS URIs of archives to be - extracted in the working directory of Spark - drivers and tasks. Supported file types: .jar, - .tar, .tar.gz, .tgz, and .zip. + extracted into the working directory of each + executor. Supported file types: .jar, .tar, + .tar.gz, .tgz, and .zip. properties (Sequence[~.gcd_jobs.SparkRJob.PropertiesEntry]): Optional. A mapping of property names to values, used to configure SparkR. Properties @@ -608,8 +606,9 @@ class JobReference(proto.Message): Attributes: project_id (str): - Required. The ID of the Google Cloud Platform - project that the job belongs to. + Optional. The ID of the Google Cloud Platform + project that the job belongs to. If specified, + must match the request project ID. job_id (str): Optional. The job ID, which must be unique within the project. The ID must contain only letters (a-z, A-Z), diff --git a/google/cloud/dataproc_v1beta2/types/shared.py b/google/cloud/dataproc_v1beta2/types/shared.py index d1c3e288..524f0916 100644 --- a/google/cloud/dataproc_v1beta2/types/shared.py +++ b/google/cloud/dataproc_v1beta2/types/shared.py @@ -27,9 +27,8 @@ class Component(proto.Enum): r"""Cluster components that can be activated.""" COMPONENT_UNSPECIFIED = 0 ANACONDA = 5 - DOCKER = 13 DRUID = 9 - FLINK = 14 + HBASE = 11 HIVE_WEBHCAT = 3 JUPYTER = 1 KERBEROS = 7 diff --git a/google/cloud/dataproc_v1beta2/types/workflow_templates.py b/google/cloud/dataproc_v1beta2/types/workflow_templates.py index 31b80e6d..22b8d11d 100644 --- a/google/cloud/dataproc_v1beta2/types/workflow_templates.py +++ b/google/cloud/dataproc_v1beta2/types/workflow_templates.py @@ -20,6 +20,7 @@ from google.cloud.dataproc_v1beta2.types import clusters from google.cloud.dataproc_v1beta2.types import jobs as gcd_jobs +from google.protobuf import duration_pb2 as duration # type: ignore from google.protobuf import timestamp_pb2 as timestamp # type: ignore @@ -118,6 +119,17 @@ class WorkflowTemplate(proto.Message): are substituted into the template. Values for parameters must be provided when the template is instantiated. + dag_timeout (~.duration.Duration): + Optional. Timeout duration for the DAG of jobs. You can use + "s", "m", "h", and "d" suffixes for second, minute, hour, + and day duration values, respectively. The timeout duration + must be from 10 minutes ("10m") to 24 hours ("24h" or "1d"). + The timer begins when the first job is submitted. If the + workflow is running at the end of the timeout period, any + remaining jobs are cancelled, the workflow is terminated, + and if the workflow was running on a `managed + cluster `__, + the cluster is deleted. """ id = proto.Field(proto.STRING, number=2) @@ -142,6 +154,8 @@ class WorkflowTemplate(proto.Message): proto.MESSAGE, number=9, message="TemplateParameter", ) + dag_timeout = proto.Field(proto.MESSAGE, number=10, message=duration.Duration,) + class WorkflowTemplatePlacement(proto.Message): r"""Specifies workflow execution target. @@ -245,21 +259,21 @@ class OrderedJob(proto.Message): underscore or hyphen. Must consist of between 3 and 50 characters. hadoop_job (~.gcd_jobs.HadoopJob): - + Optional. Job is a Hadoop job. spark_job (~.gcd_jobs.SparkJob): - + Optional. Job is a Spark job. pyspark_job (~.gcd_jobs.PySparkJob): - + Optional. Job is a PySpark job. hive_job (~.gcd_jobs.HiveJob): - + Optional. Job is a Hive job. pig_job (~.gcd_jobs.PigJob): - + Optional. Job is a Pig job. spark_r_job (~.gcd_jobs.SparkRJob): - Spark R job + Optional. Job is a SparkR job. spark_sql_job (~.gcd_jobs.SparkSqlJob): - + Optional. Job is a SparkSql job. presto_job (~.gcd_jobs.PrestoJob): - Presto job + Optional. Job is a Presto job. labels (Sequence[~.workflow_templates.OrderedJob.LabelsEntry]): Optional. The labels to associate with this job. @@ -494,6 +508,22 @@ class WorkflowMetadata(proto.Message): Output only. Workflow end time. cluster_uuid (str): Output only. The UUID of target cluster. + dag_timeout (~.duration.Duration): + Output only. The timeout duration for the DAG of jobs. + Minimum timeout duration is 10 minutes and maximum is 24 + hours, expressed as a + [google.protobuf.Duration][https://developers.google.com/protocol-buffers/docs/proto3#json_mapping]. + For example, "1800" = 1800 seconds/30 minutes duration. + dag_start_time (~.timestamp.Timestamp): + Output only. DAG start time, which is only set for workflows + with + [dag_timeout][google.cloud.dataproc.v1beta2.WorkflowMetadata.dag_timeout] + when the DAG begins. + dag_end_time (~.timestamp.Timestamp): + Output only. DAG end time, which is only set for workflows + with + [dag_timeout][google.cloud.dataproc.v1beta2.WorkflowMetadata.dag_timeout] + when the DAG ends. """ class State(proto.Enum): @@ -525,6 +555,12 @@ class State(proto.Enum): cluster_uuid = proto.Field(proto.STRING, number=11) + dag_timeout = proto.Field(proto.MESSAGE, number=12, message=duration.Duration,) + + dag_start_time = proto.Field(proto.MESSAGE, number=13, message=timestamp.Timestamp,) + + dag_end_time = proto.Field(proto.MESSAGE, number=14, message=timestamp.Timestamp,) + class ClusterOperation(proto.Message): r"""The cluster operation triggered by a workflow. diff --git a/synth.metadata b/synth.metadata index e348f44c..c119770d 100644 --- a/synth.metadata +++ b/synth.metadata @@ -3,16 +3,16 @@ { "git": { "name": ".", - "remote": "git@github.com:googleapis/python-dataproc.git", - "sha": "c9fd4b33071b3e51d6c1feeb16ffb3c32c4be7cd" + "remote": "https://github.com/googleapis/python-dataproc.git", + "sha": "05c933c1a3f3df0d35705e99604da20b353d72e7" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "e3e7e7ddb0fecd7bc61ca03b5a9ddb29cc9b48d8", - "internalRef": "342967619" + "sha": "dabe30b45bc86fdaf4126f26cbcfd73babbfa3fe", + "internalRef": "345194337" } }, { @@ -49,5 +49,188 @@ "generator": "bazel" } } + ], + "generatedFiles": [ + ".flake8", + ".github/CONTRIBUTING.md", + ".github/ISSUE_TEMPLATE/bug_report.md", + ".github/ISSUE_TEMPLATE/feature_request.md", + ".github/ISSUE_TEMPLATE/support_request.md", + ".github/PULL_REQUEST_TEMPLATE.md", + ".github/release-please.yml", + ".github/snippet-bot.yml", + ".gitignore", + ".kokoro/build.sh", + ".kokoro/continuous/common.cfg", + ".kokoro/continuous/continuous.cfg", + ".kokoro/docker/docs/Dockerfile", + ".kokoro/docker/docs/fetch_gpg_keys.sh", + ".kokoro/docs/common.cfg", + ".kokoro/docs/docs-presubmit.cfg", + ".kokoro/docs/docs.cfg", + ".kokoro/populate-secrets.sh", + ".kokoro/presubmit/common.cfg", + ".kokoro/presubmit/presubmit.cfg", + ".kokoro/publish-docs.sh", + ".kokoro/release.sh", + ".kokoro/release/common.cfg", + ".kokoro/release/release.cfg", + ".kokoro/samples/lint/common.cfg", + ".kokoro/samples/lint/continuous.cfg", + ".kokoro/samples/lint/periodic.cfg", + ".kokoro/samples/lint/presubmit.cfg", + ".kokoro/samples/python3.6/common.cfg", + ".kokoro/samples/python3.6/continuous.cfg", + ".kokoro/samples/python3.6/periodic.cfg", + ".kokoro/samples/python3.6/presubmit.cfg", + ".kokoro/samples/python3.7/common.cfg", + ".kokoro/samples/python3.7/continuous.cfg", + ".kokoro/samples/python3.7/periodic.cfg", + ".kokoro/samples/python3.7/presubmit.cfg", + ".kokoro/samples/python3.8/common.cfg", + ".kokoro/samples/python3.8/continuous.cfg", + ".kokoro/samples/python3.8/periodic.cfg", + ".kokoro/samples/python3.8/presubmit.cfg", + ".kokoro/test-samples.sh", + ".kokoro/trampoline.sh", + ".kokoro/trampoline_v2.sh", + ".trampolinerc", + "CODE_OF_CONDUCT.md", + "CONTRIBUTING.rst", + "LICENSE", + "MANIFEST.in", + "docs/_static/custom.css", + "docs/_templates/layout.html", + "docs/conf.py", + "docs/dataproc_v1/services.rst", + "docs/dataproc_v1/types.rst", + "docs/dataproc_v1beta2/services.rst", + "docs/dataproc_v1beta2/types.rst", + "docs/multiprocessing.rst", + "google/cloud/dataproc/__init__.py", + "google/cloud/dataproc/py.typed", + "google/cloud/dataproc_v1/__init__.py", + "google/cloud/dataproc_v1/proto/autoscaling_policies.proto", + "google/cloud/dataproc_v1/proto/clusters.proto", + "google/cloud/dataproc_v1/proto/jobs.proto", + "google/cloud/dataproc_v1/proto/operations.proto", + "google/cloud/dataproc_v1/proto/shared.proto", + "google/cloud/dataproc_v1/proto/workflow_templates.proto", + "google/cloud/dataproc_v1/py.typed", + "google/cloud/dataproc_v1/services/__init__.py", + "google/cloud/dataproc_v1/services/autoscaling_policy_service/__init__.py", + "google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py", + "google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py", + "google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py", + "google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py", + "google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py", + "google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py", + "google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py", + "google/cloud/dataproc_v1/services/cluster_controller/__init__.py", + "google/cloud/dataproc_v1/services/cluster_controller/async_client.py", + "google/cloud/dataproc_v1/services/cluster_controller/client.py", + "google/cloud/dataproc_v1/services/cluster_controller/pagers.py", + "google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py", + "google/cloud/dataproc_v1/services/cluster_controller/transports/base.py", + "google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py", + "google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py", + "google/cloud/dataproc_v1/services/job_controller/__init__.py", + "google/cloud/dataproc_v1/services/job_controller/async_client.py", + "google/cloud/dataproc_v1/services/job_controller/client.py", + "google/cloud/dataproc_v1/services/job_controller/pagers.py", + "google/cloud/dataproc_v1/services/job_controller/transports/__init__.py", + "google/cloud/dataproc_v1/services/job_controller/transports/base.py", + "google/cloud/dataproc_v1/services/job_controller/transports/grpc.py", + "google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py", + "google/cloud/dataproc_v1/services/workflow_template_service/__init__.py", + "google/cloud/dataproc_v1/services/workflow_template_service/async_client.py", + "google/cloud/dataproc_v1/services/workflow_template_service/client.py", + "google/cloud/dataproc_v1/services/workflow_template_service/pagers.py", + "google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py", + "google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py", + "google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py", + "google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py", + "google/cloud/dataproc_v1/types/__init__.py", + "google/cloud/dataproc_v1/types/autoscaling_policies.py", + "google/cloud/dataproc_v1/types/clusters.py", + "google/cloud/dataproc_v1/types/jobs.py", + "google/cloud/dataproc_v1/types/operations.py", + "google/cloud/dataproc_v1/types/shared.py", + "google/cloud/dataproc_v1/types/workflow_templates.py", + "google/cloud/dataproc_v1beta2/__init__.py", + "google/cloud/dataproc_v1beta2/proto/autoscaling_policies.proto", + "google/cloud/dataproc_v1beta2/proto/clusters.proto", + "google/cloud/dataproc_v1beta2/proto/jobs.proto", + "google/cloud/dataproc_v1beta2/proto/operations.proto", + "google/cloud/dataproc_v1beta2/proto/shared.proto", + "google/cloud/dataproc_v1beta2/proto/workflow_templates.proto", + "google/cloud/dataproc_v1beta2/py.typed", + "google/cloud/dataproc_v1beta2/services/__init__.py", + "google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/__init__.py", + "google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py", + "google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py", + "google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py", + "google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py", + "google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py", + "google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py", + "google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py", + "google/cloud/dataproc_v1beta2/services/cluster_controller/__init__.py", + "google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py", + "google/cloud/dataproc_v1beta2/services/cluster_controller/client.py", + "google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py", + "google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py", + "google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py", + "google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py", + "google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py", + "google/cloud/dataproc_v1beta2/services/job_controller/__init__.py", + "google/cloud/dataproc_v1beta2/services/job_controller/async_client.py", + "google/cloud/dataproc_v1beta2/services/job_controller/client.py", + "google/cloud/dataproc_v1beta2/services/job_controller/pagers.py", + "google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py", + "google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py", + "google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py", + "google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py", + "google/cloud/dataproc_v1beta2/services/workflow_template_service/__init__.py", + "google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py", + "google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py", + "google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py", + "google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py", + "google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py", + "google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py", + "google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py", + "google/cloud/dataproc_v1beta2/types/__init__.py", + "google/cloud/dataproc_v1beta2/types/autoscaling_policies.py", + "google/cloud/dataproc_v1beta2/types/clusters.py", + "google/cloud/dataproc_v1beta2/types/jobs.py", + "google/cloud/dataproc_v1beta2/types/operations.py", + "google/cloud/dataproc_v1beta2/types/shared.py", + "google/cloud/dataproc_v1beta2/types/workflow_templates.py", + "mypy.ini", + "noxfile.py", + "renovate.json", + "samples/AUTHORING_GUIDE.md", + "samples/CONTRIBUTING.md", + "samples/snippets/noxfile.py", + "scripts/decrypt-secrets.sh", + "scripts/fixup_dataproc_v1_keywords.py", + "scripts/fixup_dataproc_v1beta2_keywords.py", + "scripts/readme-gen/readme_gen.py", + "scripts/readme-gen/templates/README.tmpl.rst", + "scripts/readme-gen/templates/auth.tmpl.rst", + "scripts/readme-gen/templates/auth_api_key.tmpl.rst", + "scripts/readme-gen/templates/install_deps.tmpl.rst", + "scripts/readme-gen/templates/install_portaudio.tmpl.rst", + "setup.cfg", + "testing/.gitignore", + "tests/unit/gapic/dataproc_v1/__init__.py", + "tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py", + "tests/unit/gapic/dataproc_v1/test_cluster_controller.py", + "tests/unit/gapic/dataproc_v1/test_job_controller.py", + "tests/unit/gapic/dataproc_v1/test_workflow_template_service.py", + "tests/unit/gapic/dataproc_v1beta2/__init__.py", + "tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py", + "tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py", + "tests/unit/gapic/dataproc_v1beta2/test_job_controller.py", + "tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py" ] } \ No newline at end of file