From ce4bd9177439ff9431cec1f8e56ee2c07ab75870 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Fri, 21 Feb 2020 16:19:18 -0800 Subject: [PATCH] feat: add LifecycleConfig, ReservationAffinity, SparkRJob, PrestoJob to v1 This PR was generated using Autosynth. :rainbow: Commits in this repo since last synth:
Log from Synthtool ``` synthtool > Executing /tmpfs/src/git/autosynth/working_repo/synth.py. On branch autosynth nothing to commit, working tree clean HEAD detached at FETCH_HEAD nothing to commit, working tree clean synthtool > Ensuring dependencies. synthtool > Pulling artman image. latest: Pulling from googleapis/artman Digest: sha256:6aec9c34db0e4be221cdaf6faba27bdc07cfea846808b3d3b964dfce3a9a0f9b Status: Image is up to date for googleapis/artman:latest synthtool > Cloning googleapis. synthtool > Running generator for google/cloud/dataproc/artman_dataproc_v1.yaml. synthtool > Generated code into /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetAutoscalingPolicyRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KerberosConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJob.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListJobsRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstantiateWorkflowTemplateRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteWorkflowTemplateRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteJobRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetJobRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ParameterValidation.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateAutoscalingPolicyRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteJobRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PigJob.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstantiateWorkflowTemplateRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ReservationAffinityOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OperationsProto.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListAutoscalingPoliciesResponseOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiagnoseClusterResultsOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Cluster.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LoggingConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowNode.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HadoopJob.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateJobRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SecurityConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterSelector.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateJobRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowMetadata.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatus.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiagnoseClusterResults.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListAutoscalingPoliciesRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOperationMetadataOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOperation.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/YarnApplicationOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CancelJobRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/AutoscalingPoliciesProto.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeInitializationAction.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplatePlacement.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListJobsResponseOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListWorkflowTemplatesRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SharedProto.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplatePlacementOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiagnoseClusterRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiagnoseClusterRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/AutoscalingPolicy.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOperationOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkJobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PrestoJobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ReservationAffinity.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowGraphOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BasicYarnAutoscalingConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobScheduling.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListJobsResponse.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplatesProto.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ValueValidation.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListClustersRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupAutoscalingPolicyConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/AutoscalingConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteAutoscalingPolicyRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListClustersResponse.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListAutoscalingPoliciesRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PigJobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobSchedulingOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobStatus.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListClustersResponseOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListWorkflowTemplatesRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowNodeOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteWorkflowTemplateRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeInitializationActionOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplate.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetAutoscalingPolicyRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ManagedClusterOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteAutoscalingPolicyRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetWorkflowTemplateRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateWorkflowTemplateRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HadoopJobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobsProto.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowMetadataOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CancelJobRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupAutoscalingPolicyConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetClusterRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOperationMetadata.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameter.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJob.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowGraph.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/QueryListOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/EncryptionConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HiveJobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateWorkflowTemplateRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobPlacementOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/AcceleratorConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameterOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateAutoscalingPolicyRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KerberosConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/AutoscalingConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/AutoscalingPolicyOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkSqlJob.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetClusterRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatusOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateWorkflowTemplateRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListClustersRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/RegexValidationOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClustersProto.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ParameterValidationOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListWorkflowTemplatesResponse.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterMetrics.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateAutoscalingPolicyRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkSqlJobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ManagedGroupConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ManagedGroupConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BasicAutoscalingAlgorithmOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListAutoscalingPoliciesResponse.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ValueValidationOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Job.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkJob.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ManagedCluster.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateAutoscalingPolicyRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOperationStatusOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BasicYarnAutoscalingConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/BasicAutoscalingAlgorithm.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetWorkflowTemplateRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/AcceleratorConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstantiateInlineWorkflowTemplateRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Component.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobPlacement.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateWorkflowTemplateRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobStatusOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HiveJob.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/YarnApplication.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstantiateInlineWorkflowTemplateRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/QueryList.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterSelectorOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOperationStatus.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkJob.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/RegexValidation.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListWorkflowTemplatesResponseOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PrestoJob.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SecurityConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ListJobsRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/EncryptionConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkJobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LoggingConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReferenceOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GetJobRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobReference.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterMetricsOrBuilder.java. synthtool > Replaced '/\\*\n \\* Copyright \\d{4} Google LLC\n \\*\n \\* Licensed under the Apache License, Version 2.0 \\(the "License"\\); you may not use this file except\n \\* in compliance with the License. You may obtain a copy of the License at\n \\*\n \\* http://www.apache.org/licenses/LICENSE-2.0\n \\*\n \\* Unless required by applicable law or agreed to in writing, software distributed under the License\n \\* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n \\* or implied. See the License for the specific language governing permissions and limitations under\n \\* the License.\n \\*/\n' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateName.java. synthtool > Replaced '/\\*\n \\* Copyright \\d{4} Google LLC\n \\*\n \\* Licensed under the Apache License, Version 2.0 \\(the "License"\\); you may not use this file except\n \\* in compliance with the License. You may obtain a copy of the License at\n \\*\n \\* http://www.apache.org/licenses/LICENSE-2.0\n \\*\n \\* Unless required by applicable law or agreed to in writing, software distributed under the License\n \\* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n \\* or implied. See the License for the specific language governing permissions and limitations under\n \\* the License.\n \\*/\n' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/AutoscalingPolicyName.java. synthtool > Replaced '/\\*\n \\* Copyright \\d{4} Google LLC\n \\*\n \\* Licensed under the Apache License, Version 2.0 \\(the "License"\\); you may not use this file except\n \\* in compliance with the License. You may obtain a copy of the License at\n \\*\n \\* http://www.apache.org/licenses/LICENSE-2.0\n \\*\n \\* Unless required by applicable law or agreed to in writing, software distributed under the License\n \\* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n \\* or implied. See the License for the specific language governing permissions and limitations under\n \\* the License.\n \\*/\n' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/RegionName.java. synthtool > Replaced 'package com.google.cloud.dataproc.v1;' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/AutoscalingPolicyServiceGrpc.java. synthtool > Replaced 'package com.google.cloud.dataproc.v1;' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerGrpc.java. synthtool > Replaced 'package com.google.cloud.dataproc.v1;' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobControllerGrpc.java. synthtool > Replaced 'package com.google.cloud.dataproc.v1;' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceGrpc.java. synthtool > No files in sources [PosixPath('/home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/gapic-google-cloud-dataproc-v1/samples/src')] were copied. Does the source contain files? synthtool > No files in sources [PosixPath('/home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/gapic-google-cloud-dataproc-v1/samples/resources')] were copied. Does the source contain files? synthtool > No files in sources [PosixPath('/home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/gapic-google-cloud-dataproc-v1/samples/src/**/*.manifest.yaml')] were copied. Does the source contain files? synthtool > Running java formatter on 78 files synthtool > Running java formatter on 4 files synthtool > Running java formatter on 172 files synthtool > Running java formatter on 0 files synthtool > Running generator for google/cloud/dataproc/artman_dataproc_v1beta2.yaml. synthtool > Generated code into /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetAutoscalingPolicyRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/KerberosConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkRJob.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateWorkflowTemplateRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LifecycleConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteWorkflowTemplateRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteJobRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetJobRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ParameterValidation.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateAutoscalingPolicyRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteJobRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJob.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateWorkflowTemplateRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ReservationAffinityOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OperationsProto.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListAutoscalingPoliciesResponseOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterResultsOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Cluster.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LoggingConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowNode.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJob.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateJobRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SecurityConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterSelector.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateJobRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowMetadata.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterStatus.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterResults.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListAutoscalingPoliciesRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationMetadataOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperation.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/YarnApplicationOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CancelJobRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LifecycleConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AutoscalingPoliciesProto.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/NodeInitializationAction.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplatePlacement.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsResponseOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SharedProto.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplatePlacementOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/EndpointConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiagnoseClusterRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AutoscalingPolicy.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PySparkJobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ReservationAffinity.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowGraphOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/BasicYarnAutoscalingConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobScheduling.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsResponse.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplatesProto.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ValueValidation.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupAutoscalingPolicyConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AutoscalingConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteAutoscalingPolicyRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersResponse.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListAutoscalingPoliciesRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobSchedulingOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobStatus.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersResponseOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowNodeOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteWorkflowTemplateRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/NodeInitializationActionOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplate.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetAutoscalingPolicyRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedClusterOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteAutoscalingPolicyRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetWorkflowTemplateRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateWorkflowTemplateRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobsProto.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowMetadataOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CancelJobRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupAutoscalingPolicyConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetClusterRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationMetadata.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/TemplateParameter.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJob.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowGraph.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/QueryListOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/EncryptionConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateWorkflowTemplateRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobPlacementOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/TemplateParameterOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateAutoscalingPolicyRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/KerberosConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AutoscalingConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AutoscalingPolicyOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkSqlJob.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetClusterRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterStatusOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateWorkflowTemplateRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListClustersRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/RegexValidationOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClustersProto.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ParameterValidationOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponse.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterMetrics.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateAutoscalingPolicyRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkSqlJobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedGroupConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedGroupConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/BasicAutoscalingAlgorithmOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListAutoscalingPoliciesResponse.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/EndpointConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ValueValidationOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Job.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PySparkJob.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ManagedCluster.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateAutoscalingPolicyRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkRJobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationStatusOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/BasicYarnAutoscalingConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/BasicAutoscalingAlgorithm.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetWorkflowTemplateRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateInlineWorkflowTemplateRequestOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Component.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobPlacement.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateWorkflowTemplateRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobStatusOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJob.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/YarnApplication.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstantiateInlineWorkflowTemplateRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/QueryList.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterSelectorOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOperationStatus.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkJob.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/RegexValidation.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponseOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SecurityConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiskConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListJobsRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/EncryptionConfig.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DiskConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkJobOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/LoggingConfigOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobReferenceOrBuilder.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GetJobRequest.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobReference.java. synthtool > Replaced '// Generated by the protocol buffer compiler. DO NOT EDIT!' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterMetricsOrBuilder.java. synthtool > Replaced '/\\*\n \\* Copyright \\d{4} Google LLC\n \\*\n \\* Licensed under the Apache License, Version 2.0 \\(the "License"\\); you may not use this file except\n \\* in compliance with the License. You may obtain a copy of the License at\n \\*\n \\* http://www.apache.org/licenses/LICENSE-2.0\n \\*\n \\* Unless required by applicable law or agreed to in writing, software distributed under the License\n \\* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n \\* or implied. See the License for the specific language governing permissions and limitations under\n \\* the License.\n \\*/\n' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateName.java. synthtool > Replaced '/\\*\n \\* Copyright \\d{4} Google LLC\n \\*\n \\* Licensed under the Apache License, Version 2.0 \\(the "License"\\); you may not use this file except\n \\* in compliance with the License. You may obtain a copy of the License at\n \\*\n \\* http://www.apache.org/licenses/LICENSE-2.0\n \\*\n \\* Unless required by applicable law or agreed to in writing, software distributed under the License\n \\* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n \\* or implied. See the License for the specific language governing permissions and limitations under\n \\* the License.\n \\*/\n' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AutoscalingPolicyName.java. synthtool > Replaced '/\\*\n \\* Copyright \\d{4} Google LLC\n \\*\n \\* Licensed under the Apache License, Version 2.0 \\(the "License"\\); you may not use this file except\n \\* in compliance with the License. You may obtain a copy of the License at\n \\*\n \\* http://www.apache.org/licenses/LICENSE-2.0\n \\*\n \\* Unless required by applicable law or agreed to in writing, software distributed under the License\n \\* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n \\* or implied. See the License for the specific language governing permissions and limitations under\n \\* the License.\n \\*/\n' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/RegionName.java. synthtool > Replaced 'package com.google.cloud.dataproc.v1beta2;' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AutoscalingPolicyServiceGrpc.java. synthtool > Replaced 'package com.google.cloud.dataproc.v1beta2;' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterControllerGrpc.java. synthtool > Replaced 'package com.google.cloud.dataproc.v1beta2;' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobControllerGrpc.java. synthtool > Replaced 'package com.google.cloud.dataproc.v1beta2;' in /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceGrpc.java. synthtool > No files in sources [PosixPath('/home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/gapic-google-cloud-dataproc-v1beta2/samples/src')] were copied. Does the source contain files? synthtool > No files in sources [PosixPath('/home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/gapic-google-cloud-dataproc-v1beta2/samples/resources')] were copied. Does the source contain files? synthtool > No files in sources [PosixPath('/home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/java/gapic-google-cloud-dataproc-v1beta2/samples/src/**/*.manifest.yaml')] were copied. Does the source contain files? synthtool > Running java formatter on 78 files synthtool > Running java formatter on 4 files synthtool > Running java formatter on 172 files synthtool > Running java formatter on 0 files .github/ISSUE_TEMPLATE/bug_report.md .github/ISSUE_TEMPLATE/feature_request.md .github/ISSUE_TEMPLATE/support_request.md .github/PULL_REQUEST_TEMPLATE.md .github/release-please.yml .kokoro/build.bat .kokoro/build.sh .kokoro/coerce_logs.sh .kokoro/common.cfg .kokoro/continuous/common.cfg .kokoro/continuous/dependencies.cfg .kokoro/continuous/integration.cfg .kokoro/continuous/java11.cfg .kokoro/continuous/java7.cfg .kokoro/continuous/java8-osx.cfg .kokoro/continuous/java8-win.cfg .kokoro/continuous/java8.cfg .kokoro/continuous/lint.cfg .kokoro/continuous/propose_release.cfg .kokoro/continuous/propose_release.sh .kokoro/continuous/samples.cfg .kokoro/dependencies.sh .kokoro/linkage-monitor.sh .kokoro/nightly/common.cfg .kokoro/nightly/dependencies.cfg .kokoro/nightly/integration.cfg .kokoro/nightly/java11.cfg .kokoro/nightly/java7.cfg .kokoro/nightly/java8-osx.cfg .kokoro/nightly/java8-win.cfg .kokoro/nightly/java8.cfg .kokoro/nightly/lint.cfg .kokoro/nightly/samples.cfg .kokoro/presubmit/clirr.cfg .kokoro/presubmit/common.cfg .kokoro/presubmit/dependencies.cfg .kokoro/presubmit/integration.cfg .kokoro/presubmit/java11.cfg .kokoro/presubmit/java7.cfg .kokoro/presubmit/java8-osx.cfg .kokoro/presubmit/java8-win.cfg .kokoro/presubmit/java8.cfg .kokoro/presubmit/linkage-monitor.cfg .kokoro/presubmit/lint.cfg .kokoro/presubmit/samples.cfg .kokoro/release/bump_snapshot.cfg .kokoro/release/bump_snapshot.sh .kokoro/release/common.cfg .kokoro/release/common.sh .kokoro/release/drop.cfg .kokoro/release/drop.sh .kokoro/release/promote.cfg .kokoro/release/promote.sh .kokoro/release/publish_javadoc.cfg .kokoro/release/publish_javadoc.sh .kokoro/release/snapshot.cfg .kokoro/release/snapshot.sh .kokoro/release/stage.cfg .kokoro/release/stage.sh .kokoro/trampoline.sh CODE_OF_CONDUCT.md CONTRIBUTING.md LICENSE README.md codecov.yaml java.header license-checks.xml renovate.json synthtool > Wrote metadata to synth.metadata. ```
--- .../dataproc/v1/ClusterControllerClient.java | 40 +- .../dataproc/v1/JobControllerClient.java | 15 +- .../v1/WorkflowTemplateServiceClient.java | 48 +- .../dataproc/v1/ClusterControllerGrpc.java | 40 +- .../cloud/dataproc/v1/JobControllerGrpc.java | 16 +- .../v1/WorkflowTemplateServiceGrpc.java | 32 +- .../clirr-ignored-differences.xml | 19 + .../cloud/dataproc/v1/ClusterConfig.java | 299 +++ .../dataproc/v1/ClusterConfigOrBuilder.java | 41 + .../cloud/dataproc/v1/ClusterStatus.java | 36 +- .../dataproc/v1/ClusterStatusOrBuilder.java | 9 +- .../cloud/dataproc/v1/ClustersProto.java | 417 +-- .../cloud/dataproc/v1/GceClusterConfig.java | 304 +++ .../v1/GceClusterConfigOrBuilder.java | 41 + .../dataproc/v1/InstanceGroupConfig.java | 4 +- .../com/google/cloud/dataproc/v1/Job.java | 1109 ++++++-- .../cloud/dataproc/v1/JobOrBuilder.java | 184 +- .../google/cloud/dataproc/v1/JobsProto.java | 468 ++-- .../cloud/dataproc/v1/LifecycleConfig.java | 1881 ++++++++++++++ .../dataproc/v1/LifecycleConfigOrBuilder.java | 218 ++ .../dataproc/v1/NodeInitializationAction.java | 48 +- .../v1/NodeInitializationActionOrBuilder.java | 12 +- .../google/cloud/dataproc/v1/OrderedJob.java | 624 +---- .../dataproc/v1/OrderedJobOrBuilder.java | 132 +- .../google/cloud/dataproc/v1/PrestoJob.java | 2240 +++++++++++++++++ .../cloud/dataproc/v1/PrestoJobOrBuilder.java | 290 +++ .../dataproc/v1/ReservationAffinity.java | 1240 +++++++++ .../v1/ReservationAffinityOrBuilder.java | 130 + .../google/cloud/dataproc/v1/SparkRJob.java | 2198 ++++++++++++++++ .../cloud/dataproc/v1/SparkRJobOrBuilder.java | 340 +++ .../dataproc/v1/UpdateClusterRequest.java | 36 +- .../v1/UpdateClusterRequestOrBuilder.java | 9 +- .../dataproc/v1/autoscaling_policies.proto | 4 +- .../google/cloud/dataproc/v1/clusters.proto | 91 +- .../proto/google/cloud/dataproc/v1/jobs.proto | 102 +- .../google/cloud/dataproc/v1/operations.proto | 3 +- .../google/cloud/dataproc/v1/shared.proto | 3 +- .../dataproc/v1/workflow_templates.proto | 21 +- synth.metadata | 12 +- 39 files changed, 11272 insertions(+), 1484 deletions(-) create mode 100644 proto-google-cloud-dataproc-v1/clirr-ignored-differences.xml create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfig.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfigOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PrestoJob.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PrestoJobOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ReservationAffinity.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ReservationAffinityOrBuilder.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJob.java create mode 100644 proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJobOrBuilder.java diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerClient.java b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerClient.java index b1ca4ad6..9da0987a 100644 --- a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerClient.java +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerClient.java @@ -180,7 +180,7 @@ public final OperationsClient getOperationsClient() { /** * Creates a cluster in a project. The returned * [Operation.metadata][google.longrunning.Operation.metadata] will be - * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). * *

Sample code: * @@ -216,7 +216,7 @@ public final OperationFuture createClusterAsy /** * Creates a cluster in a project. The returned * [Operation.metadata][google.longrunning.Operation.metadata] will be - * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). * *

Sample code: * @@ -248,7 +248,7 @@ public final OperationFuture createClusterAsy /** * Creates a cluster in a project. The returned * [Operation.metadata][google.longrunning.Operation.metadata] will be - * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). * *

Sample code: * @@ -278,7 +278,7 @@ public final OperationFuture createClusterAsy /** * Creates a cluster in a project. The returned * [Operation.metadata][google.longrunning.Operation.metadata] will be - * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). * *

Sample code: * @@ -306,7 +306,7 @@ public final UnaryCallable createClusterCallabl /** * Updates a cluster in a project. The returned * [Operation.metadata][google.longrunning.Operation.metadata] will be - * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). * *

Sample code: * @@ -367,7 +367,7 @@ public final OperationFuture updateClusterAsy /** * Updates a cluster in a project. The returned * [Operation.metadata][google.longrunning.Operation.metadata] will be - * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). * *

Sample code: * @@ -403,7 +403,7 @@ public final OperationFuture updateClusterAsy /** * Updates a cluster in a project. The returned * [Operation.metadata][google.longrunning.Operation.metadata] will be - * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). * *

Sample code: * @@ -437,7 +437,7 @@ public final OperationFuture updateClusterAsy /** * Updates a cluster in a project. The returned * [Operation.metadata][google.longrunning.Operation.metadata] will be - * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). * *

Sample code: * @@ -469,7 +469,7 @@ public final UnaryCallable updateClusterCallabl /** * Deletes a cluster in a project. The returned * [Operation.metadata][google.longrunning.Operation.metadata] will be - * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). * *

Sample code: * @@ -505,7 +505,7 @@ public final OperationFuture deleteClusterAsync /** * Deletes a cluster in a project. The returned * [Operation.metadata][google.longrunning.Operation.metadata] will be - * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). * *

Sample code: * @@ -537,7 +537,7 @@ public final OperationFuture deleteClusterAsync /** * Deletes a cluster in a project. The returned * [Operation.metadata][google.longrunning.Operation.metadata] will be - * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). * *

Sample code: * @@ -567,7 +567,7 @@ public final OperationFuture deleteClusterAsync /** * Deletes a cluster in a project. The returned * [Operation.metadata][google.longrunning.Operation.metadata] will be - * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). * *

Sample code: * @@ -840,10 +840,10 @@ public final UnaryCallable listCluste /** * Gets cluster diagnostic information. The returned * [Operation.metadata][google.longrunning.Operation.metadata] will be - * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). * After the operation completes, [Operation.response][google.longrunning.Operation.response] * contains - * [DiagnoseClusterResults](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). + * [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). * *

Sample code: * @@ -879,10 +879,10 @@ public final OperationFuture diagnoseClusterAsync /** * Gets cluster diagnostic information. The returned * [Operation.metadata][google.longrunning.Operation.metadata] will be - * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). * After the operation completes, [Operation.response][google.longrunning.Operation.response] * contains - * [DiagnoseClusterResults](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). + * [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). * *

Sample code: * @@ -914,10 +914,10 @@ public final OperationFuture diagnoseClusterAsync /** * Gets cluster diagnostic information. The returned * [Operation.metadata][google.longrunning.Operation.metadata] will be - * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). * After the operation completes, [Operation.response][google.longrunning.Operation.response] * contains - * [DiagnoseClusterResults](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). + * [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). * *

Sample code: * @@ -947,10 +947,10 @@ public final OperationFuture diagnoseClusterAsync /** * Gets cluster diagnostic information. The returned * [Operation.metadata][google.longrunning.Operation.metadata] will be - * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). * After the operation completes, [Operation.response][google.longrunning.Operation.response] * contains - * [DiagnoseClusterResults](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). + * [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). * *

Sample code: * diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/JobControllerClient.java b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/JobControllerClient.java index cd01f64e..ad10a4f5 100644 --- a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/JobControllerClient.java +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/JobControllerClient.java @@ -534,8 +534,9 @@ public final UnaryCallable updateJobCallable() { // AUTO-GENERATED DOCUMENTATION AND METHOD /** * Starts a job cancellation request. To access the job resource after cancellation, call - * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or - * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). + * [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) + * or + * [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). * *

Sample code: * @@ -566,8 +567,9 @@ public final Job cancelJob(String projectId, String region, String jobId) { // AUTO-GENERATED DOCUMENTATION AND METHOD /** * Starts a job cancellation request. To access the job resource after cancellation, call - * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or - * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). + * [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) + * or + * [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). * *

Sample code: * @@ -595,8 +597,9 @@ public final Job cancelJob(CancelJobRequest request) { // AUTO-GENERATED DOCUMENTATION AND METHOD /** * Starts a job cancellation request. To access the job resource after cancellation, call - * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or - * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). + * [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) + * or + * [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). * *

Sample code: * diff --git a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceClient.java b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceClient.java index 4c651b3d..8db2a56e 100644 --- a/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceClient.java +++ b/google-cloud-dataproc/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceClient.java @@ -414,9 +414,9 @@ public final WorkflowTemplate getWorkflowTemplate(GetWorkflowTemplateRequest req * inflight jobs to be cancelled and workflow-owned clusters to be deleted. * *

The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * *

On successful completion, [Operation.response][google.longrunning.Operation.response] will * be [Empty][google.protobuf.Empty]. @@ -464,9 +464,9 @@ public final OperationFuture instantiateWorkflowTemplat * inflight jobs to be cancelled and workflow-owned clusters to be deleted. * *

The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * *

On successful completion, [Operation.response][google.longrunning.Operation.response] will * be [Empty][google.protobuf.Empty]. @@ -512,9 +512,9 @@ public final OperationFuture instantiateWorkflowTemplat * inflight jobs to be cancelled and workflow-owned clusters to be deleted. * *

The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * *

On successful completion, [Operation.response][google.longrunning.Operation.response] will * be [Empty][google.protobuf.Empty]. @@ -566,9 +566,9 @@ public final OperationFuture instantiateWorkflowTemplat * inflight jobs to be cancelled and workflow-owned clusters to be deleted. * *

The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * *

On successful completion, [Operation.response][google.longrunning.Operation.response] will * be [Empty][google.protobuf.Empty]. @@ -620,9 +620,9 @@ public final OperationFuture instantiateWorkflowTemplat * inflight jobs to be cancelled and workflow-owned clusters to be deleted. * *

The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * *

On successful completion, [Operation.response][google.longrunning.Operation.response] will * be [Empty][google.protobuf.Empty]. @@ -662,9 +662,9 @@ public final OperationFuture instantiateWorkflowTemplat * inflight jobs to be cancelled and workflow-owned clusters to be deleted. * *

The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * *

On successful completion, [Operation.response][google.longrunning.Operation.response] will * be [Empty][google.protobuf.Empty]. @@ -702,9 +702,9 @@ public final OperationFuture instantiateWorkflowTemplat * inflight jobs to be cancelled and workflow-owned clusters to be deleted. * *

The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * *

On successful completion, [Operation.response][google.longrunning.Operation.response] will * be [Empty][google.protobuf.Empty]. @@ -746,9 +746,9 @@ public final OperationFuture instantiateWorkflowTemplat * inflight jobs to be cancelled and workflow-owned clusters to be deleted. * *

The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * *

On successful completion, [Operation.response][google.longrunning.Operation.response] will * be [Empty][google.protobuf.Empty]. @@ -802,9 +802,9 @@ public final OperationFuture instantiateInlineWorkflowT * inflight jobs to be cancelled and workflow-owned clusters to be deleted. * *

The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * *

On successful completion, [Operation.response][google.longrunning.Operation.response] will * be [Empty][google.protobuf.Empty]. @@ -858,9 +858,9 @@ public final OperationFuture instantiateInlineWorkflowT * inflight jobs to be cancelled and workflow-owned clusters to be deleted. * *

The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * *

On successful completion, [Operation.response][google.longrunning.Operation.response] will * be [Empty][google.protobuf.Empty]. @@ -907,9 +907,9 @@ public final OperationFuture instantiateInlineWorkflowT * inflight jobs to be cancelled and workflow-owned clusters to be deleted. * *

The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * *

On successful completion, [Operation.response][google.longrunning.Operation.response] will * be [Empty][google.protobuf.Empty]. @@ -954,9 +954,9 @@ public final OperationFuture instantiateInlineWorkflowT * inflight jobs to be cancelled and workflow-owned clusters to be deleted. * *

The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * *

On successful completion, [Operation.response][google.longrunning.Operation.response] will * be [Empty][google.protobuf.Empty]. diff --git a/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerGrpc.java b/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerGrpc.java index d35fa1ce..ff2c5246 100644 --- a/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerGrpc.java +++ b/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterControllerGrpc.java @@ -396,7 +396,7 @@ public abstract static class ClusterControllerImplBase implements io.grpc.Bindab *

      * Creates a cluster in a project. The returned
      * [Operation.metadata][google.longrunning.Operation.metadata] will be
-     * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
+     * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
      * 
*/ public void createCluster( @@ -411,7 +411,7 @@ public void createCluster( *
      * Updates a cluster in a project. The returned
      * [Operation.metadata][google.longrunning.Operation.metadata] will be
-     * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
+     * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
      * 
*/ public void updateCluster( @@ -426,7 +426,7 @@ public void updateCluster( *
      * Deletes a cluster in a project. The returned
      * [Operation.metadata][google.longrunning.Operation.metadata] will be
-     * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
+     * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
      * 
*/ public void deleteCluster( @@ -468,11 +468,11 @@ public void listClusters( *
      * Gets cluster diagnostic information. The returned
      * [Operation.metadata][google.longrunning.Operation.metadata] will be
-     * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
+     * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
      * After the operation completes,
      * [Operation.response][google.longrunning.Operation.response]
      * contains
-     * [DiagnoseClusterResults](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).
+     * [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).
      * 
*/ public void diagnoseCluster( @@ -555,7 +555,7 @@ protected ClusterControllerStub build( *
      * Creates a cluster in a project. The returned
      * [Operation.metadata][google.longrunning.Operation.metadata] will be
-     * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
+     * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
      * 
*/ public void createCluster( @@ -573,7 +573,7 @@ public void createCluster( *
      * Updates a cluster in a project. The returned
      * [Operation.metadata][google.longrunning.Operation.metadata] will be
-     * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
+     * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
      * 
*/ public void updateCluster( @@ -591,7 +591,7 @@ public void updateCluster( *
      * Deletes a cluster in a project. The returned
      * [Operation.metadata][google.longrunning.Operation.metadata] will be
-     * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
+     * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
      * 
*/ public void deleteCluster( @@ -642,11 +642,11 @@ public void listClusters( *
      * Gets cluster diagnostic information. The returned
      * [Operation.metadata][google.longrunning.Operation.metadata] will be
-     * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
+     * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
      * After the operation completes,
      * [Operation.response][google.longrunning.Operation.response]
      * contains
-     * [DiagnoseClusterResults](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).
+     * [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).
      * 
*/ public void diagnoseCluster( @@ -690,7 +690,7 @@ protected ClusterControllerBlockingStub build( *
      * Creates a cluster in a project. The returned
      * [Operation.metadata][google.longrunning.Operation.metadata] will be
-     * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
+     * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
      * 
*/ public com.google.longrunning.Operation createCluster( @@ -705,7 +705,7 @@ public com.google.longrunning.Operation createCluster( *
      * Updates a cluster in a project. The returned
      * [Operation.metadata][google.longrunning.Operation.metadata] will be
-     * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
+     * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
      * 
*/ public com.google.longrunning.Operation updateCluster( @@ -720,7 +720,7 @@ public com.google.longrunning.Operation updateCluster( *
      * Deletes a cluster in a project. The returned
      * [Operation.metadata][google.longrunning.Operation.metadata] will be
-     * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
+     * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
      * 
*/ public com.google.longrunning.Operation deleteCluster( @@ -761,11 +761,11 @@ public com.google.cloud.dataproc.v1.ListClustersResponse listClusters( *
      * Gets cluster diagnostic information. The returned
      * [Operation.metadata][google.longrunning.Operation.metadata] will be
-     * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
+     * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
      * After the operation completes,
      * [Operation.response][google.longrunning.Operation.response]
      * contains
-     * [DiagnoseClusterResults](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).
+     * [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).
      * 
*/ public com.google.longrunning.Operation diagnoseCluster( @@ -805,7 +805,7 @@ protected ClusterControllerFutureStub build( *
      * Creates a cluster in a project. The returned
      * [Operation.metadata][google.longrunning.Operation.metadata] will be
-     * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
+     * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
      * 
*/ public com.google.common.util.concurrent.ListenableFuture @@ -820,7 +820,7 @@ protected ClusterControllerFutureStub build( *
      * Updates a cluster in a project. The returned
      * [Operation.metadata][google.longrunning.Operation.metadata] will be
-     * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
+     * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
      * 
*/ public com.google.common.util.concurrent.ListenableFuture @@ -835,7 +835,7 @@ protected ClusterControllerFutureStub build( *
      * Deletes a cluster in a project. The returned
      * [Operation.metadata][google.longrunning.Operation.metadata] will be
-     * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
+     * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
      * 
*/ public com.google.common.util.concurrent.ListenableFuture @@ -877,11 +877,11 @@ protected ClusterControllerFutureStub build( *
      * Gets cluster diagnostic information. The returned
      * [Operation.metadata][google.longrunning.Operation.metadata] will be
-     * [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
+     * [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
      * After the operation completes,
      * [Operation.response][google.longrunning.Operation.response]
      * contains
-     * [DiagnoseClusterResults](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).
+     * [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).
      * 
*/ public com.google.common.util.concurrent.ListenableFuture diff --git a/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobControllerGrpc.java b/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobControllerGrpc.java index 3f0e79f4..0e99fef9 100644 --- a/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobControllerGrpc.java +++ b/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobControllerGrpc.java @@ -434,9 +434,9 @@ public void updateJob( *
      * Starts a job cancellation request. To access the job resource
      * after cancellation, call
-     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
+     * [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
      * or
-     * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
+     * [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
      * 
*/ public void cancelJob( @@ -594,9 +594,9 @@ public void updateJob( *
      * Starts a job cancellation request. To access the job resource
      * after cancellation, call
-     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
+     * [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
      * or
-     * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
+     * [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
      * 
*/ public void cancelJob( @@ -703,9 +703,9 @@ public com.google.cloud.dataproc.v1.Job updateJob( *
      * Starts a job cancellation request. To access the job resource
      * after cancellation, call
-     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
+     * [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
      * or
-     * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
+     * [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
      * 
*/ public com.google.cloud.dataproc.v1.Job cancelJob( @@ -809,9 +809,9 @@ protected JobControllerFutureStub build( *
      * Starts a job cancellation request. To access the job resource
      * after cancellation, call
-     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
+     * [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
      * or
-     * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
+     * [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
      * 
*/ public com.google.common.util.concurrent.ListenableFuture diff --git a/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceGrpc.java b/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceGrpc.java index ed8c3f82..186c91ce 100644 --- a/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceGrpc.java +++ b/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceGrpc.java @@ -555,9 +555,9 @@ public void getWorkflowTemplate( * This will cause any inflight jobs to be cancelled and workflow-owned * clusters to be deleted. * The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * On successful completion, * [Operation.response][google.longrunning.Operation.response] will be * [Empty][google.protobuf.Empty]. @@ -586,9 +586,9 @@ public void instantiateWorkflowTemplate( * This will cause any inflight jobs to be cancelled and workflow-owned * clusters to be deleted. * The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * On successful completion, * [Operation.response][google.longrunning.Operation.response] will be * [Empty][google.protobuf.Empty]. @@ -772,9 +772,9 @@ public void getWorkflowTemplate( * This will cause any inflight jobs to be cancelled and workflow-owned * clusters to be deleted. * The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * On successful completion, * [Operation.response][google.longrunning.Operation.response] will be * [Empty][google.protobuf.Empty]. @@ -806,9 +806,9 @@ public void instantiateWorkflowTemplate( * This will cause any inflight jobs to be cancelled and workflow-owned * clusters to be deleted. * The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * On successful completion, * [Operation.response][google.longrunning.Operation.response] will be * [Empty][google.protobuf.Empty]. @@ -943,9 +943,9 @@ public com.google.cloud.dataproc.v1.WorkflowTemplate getWorkflowTemplate( * This will cause any inflight jobs to be cancelled and workflow-owned * clusters to be deleted. * The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * On successful completion, * [Operation.response][google.longrunning.Operation.response] will be * [Empty][google.protobuf.Empty]. @@ -974,9 +974,9 @@ public com.google.longrunning.Operation instantiateWorkflowTemplate( * This will cause any inflight jobs to be cancelled and workflow-owned * clusters to be deleted. * The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * On successful completion, * [Operation.response][google.longrunning.Operation.response] will be * [Empty][google.protobuf.Empty]. @@ -1101,9 +1101,9 @@ protected WorkflowTemplateServiceFutureStub build( * This will cause any inflight jobs to be cancelled and workflow-owned * clusters to be deleted. * The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * On successful completion, * [Operation.response][google.longrunning.Operation.response] will be * [Empty][google.protobuf.Empty]. @@ -1134,9 +1134,9 @@ protected WorkflowTemplateServiceFutureStub build( * This will cause any inflight jobs to be cancelled and workflow-owned * clusters to be deleted. * The [Operation.metadata][google.longrunning.Operation.metadata] will be - * [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + * [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). * Also see [Using - * WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + * WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). * On successful completion, * [Operation.response][google.longrunning.Operation.response] will be * [Empty][google.protobuf.Empty]. diff --git a/proto-google-cloud-dataproc-v1/clirr-ignored-differences.xml b/proto-google-cloud-dataproc-v1/clirr-ignored-differences.xml new file mode 100644 index 00000000..fa53d8b6 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/clirr-ignored-differences.xml @@ -0,0 +1,19 @@ + + + + + 7012 + com/google/cloud/dataproc/v1/*OrBuilder + * get*(*) + + + 7012 + com/google/cloud/dataproc/v1/*OrBuilder + boolean contains*(*) + + + 7012 + com/google/cloud/dataproc/v1/*OrBuilder + boolean has*(*) + + diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java index 777305d3..07936bde 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java @@ -203,6 +203,22 @@ private ClusterConfig( securityConfig_ = subBuilder.buildPartial(); } + break; + } + case 138: + { + com.google.cloud.dataproc.v1.LifecycleConfig.Builder subBuilder = null; + if (lifecycleConfig_ != null) { + subBuilder = lifecycleConfig_.toBuilder(); + } + lifecycleConfig_ = + input.readMessage( + com.google.cloud.dataproc.v1.LifecycleConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(lifecycleConfig_); + lifecycleConfig_ = subBuilder.buildPartial(); + } + break; } case 146: @@ -877,6 +893,57 @@ public com.google.cloud.dataproc.v1.SecurityConfigOrBuilder getSecurityConfigOrB return getSecurityConfig(); } + public static final int LIFECYCLE_CONFIG_FIELD_NUMBER = 17; + private com.google.cloud.dataproc.v1.LifecycleConfig lifecycleConfig_; + /** + * + * + *
+   * Optional. Lifecycle setting for the cluster.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LifecycleConfig lifecycle_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the lifecycleConfig field is set. + */ + public boolean hasLifecycleConfig() { + return lifecycleConfig_ != null; + } + /** + * + * + *
+   * Optional. Lifecycle setting for the cluster.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LifecycleConfig lifecycle_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The lifecycleConfig. + */ + public com.google.cloud.dataproc.v1.LifecycleConfig getLifecycleConfig() { + return lifecycleConfig_ == null + ? com.google.cloud.dataproc.v1.LifecycleConfig.getDefaultInstance() + : lifecycleConfig_; + } + /** + * + * + *
+   * Optional. Lifecycle setting for the cluster.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LifecycleConfig lifecycle_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.LifecycleConfigOrBuilder getLifecycleConfigOrBuilder() { + return getLifecycleConfig(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -918,6 +985,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (securityConfig_ != null) { output.writeMessage(16, getSecurityConfig()); } + if (lifecycleConfig_ != null) { + output.writeMessage(17, getLifecycleConfig()); + } if (autoscalingConfig_ != null) { output.writeMessage(18, getAutoscalingConfig()); } @@ -960,6 +1030,9 @@ public int getSerializedSize() { if (securityConfig_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(16, getSecurityConfig()); } + if (lifecycleConfig_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(17, getLifecycleConfig()); + } if (autoscalingConfig_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(18, getAutoscalingConfig()); } @@ -1013,6 +1086,10 @@ public boolean equals(final java.lang.Object obj) { if (hasSecurityConfig()) { if (!getSecurityConfig().equals(other.getSecurityConfig())) return false; } + if (hasLifecycleConfig() != other.hasLifecycleConfig()) return false; + if (hasLifecycleConfig()) { + if (!getLifecycleConfig().equals(other.getLifecycleConfig())) return false; + } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -1062,6 +1139,10 @@ public int hashCode() { hash = (37 * hash) + SECURITY_CONFIG_FIELD_NUMBER; hash = (53 * hash) + getSecurityConfig().hashCode(); } + if (hasLifecycleConfig()) { + hash = (37 * hash) + LIFECYCLE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getLifecycleConfig().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -1265,6 +1346,12 @@ public Builder clear() { securityConfig_ = null; securityConfigBuilder_ = null; } + if (lifecycleConfigBuilder_ == null) { + lifecycleConfig_ = null; + } else { + lifecycleConfig_ = null; + lifecycleConfigBuilder_ = null; + } return this; } @@ -1343,6 +1430,11 @@ public com.google.cloud.dataproc.v1.ClusterConfig buildPartial() { } else { result.securityConfig_ = securityConfigBuilder_.build(); } + if (lifecycleConfigBuilder_ == null) { + result.lifecycleConfig_ = lifecycleConfig_; + } else { + result.lifecycleConfig_ = lifecycleConfigBuilder_.build(); + } onBuilt(); return result; } @@ -1447,6 +1539,9 @@ public Builder mergeFrom(com.google.cloud.dataproc.v1.ClusterConfig other) { if (other.hasSecurityConfig()) { mergeSecurityConfig(other.getSecurityConfig()); } + if (other.hasLifecycleConfig()) { + mergeLifecycleConfig(other.getLifecycleConfig()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -3905,6 +4000,210 @@ public com.google.cloud.dataproc.v1.SecurityConfigOrBuilder getSecurityConfigOrB return securityConfigBuilder_; } + private com.google.cloud.dataproc.v1.LifecycleConfig lifecycleConfig_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.LifecycleConfig, + com.google.cloud.dataproc.v1.LifecycleConfig.Builder, + com.google.cloud.dataproc.v1.LifecycleConfigOrBuilder> + lifecycleConfigBuilder_; + /** + * + * + *
+     * Optional. Lifecycle setting for the cluster.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LifecycleConfig lifecycle_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the lifecycleConfig field is set. + */ + public boolean hasLifecycleConfig() { + return lifecycleConfigBuilder_ != null || lifecycleConfig_ != null; + } + /** + * + * + *
+     * Optional. Lifecycle setting for the cluster.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LifecycleConfig lifecycle_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The lifecycleConfig. + */ + public com.google.cloud.dataproc.v1.LifecycleConfig getLifecycleConfig() { + if (lifecycleConfigBuilder_ == null) { + return lifecycleConfig_ == null + ? com.google.cloud.dataproc.v1.LifecycleConfig.getDefaultInstance() + : lifecycleConfig_; + } else { + return lifecycleConfigBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Optional. Lifecycle setting for the cluster.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LifecycleConfig lifecycle_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setLifecycleConfig(com.google.cloud.dataproc.v1.LifecycleConfig value) { + if (lifecycleConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + lifecycleConfig_ = value; + onChanged(); + } else { + lifecycleConfigBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Lifecycle setting for the cluster.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LifecycleConfig lifecycle_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setLifecycleConfig( + com.google.cloud.dataproc.v1.LifecycleConfig.Builder builderForValue) { + if (lifecycleConfigBuilder_ == null) { + lifecycleConfig_ = builderForValue.build(); + onChanged(); + } else { + lifecycleConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Optional. Lifecycle setting for the cluster.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LifecycleConfig lifecycle_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeLifecycleConfig(com.google.cloud.dataproc.v1.LifecycleConfig value) { + if (lifecycleConfigBuilder_ == null) { + if (lifecycleConfig_ != null) { + lifecycleConfig_ = + com.google.cloud.dataproc.v1.LifecycleConfig.newBuilder(lifecycleConfig_) + .mergeFrom(value) + .buildPartial(); + } else { + lifecycleConfig_ = value; + } + onChanged(); + } else { + lifecycleConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Lifecycle setting for the cluster.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LifecycleConfig lifecycle_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearLifecycleConfig() { + if (lifecycleConfigBuilder_ == null) { + lifecycleConfig_ = null; + onChanged(); + } else { + lifecycleConfig_ = null; + lifecycleConfigBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Optional. Lifecycle setting for the cluster.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LifecycleConfig lifecycle_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.LifecycleConfig.Builder getLifecycleConfigBuilder() { + + onChanged(); + return getLifecycleConfigFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. Lifecycle setting for the cluster.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LifecycleConfig lifecycle_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.LifecycleConfigOrBuilder getLifecycleConfigOrBuilder() { + if (lifecycleConfigBuilder_ != null) { + return lifecycleConfigBuilder_.getMessageOrBuilder(); + } else { + return lifecycleConfig_ == null + ? com.google.cloud.dataproc.v1.LifecycleConfig.getDefaultInstance() + : lifecycleConfig_; + } + } + /** + * + * + *
+     * Optional. Lifecycle setting for the cluster.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LifecycleConfig lifecycle_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.LifecycleConfig, + com.google.cloud.dataproc.v1.LifecycleConfig.Builder, + com.google.cloud.dataproc.v1.LifecycleConfigOrBuilder> + getLifecycleConfigFieldBuilder() { + if (lifecycleConfigBuilder_ == null) { + lifecycleConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.LifecycleConfig, + com.google.cloud.dataproc.v1.LifecycleConfig.Builder, + com.google.cloud.dataproc.v1.LifecycleConfigOrBuilder>( + getLifecycleConfig(), getParentForChildren(), isClean()); + lifecycleConfig_ = null; + } + return lifecycleConfigBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java index 6f4f78ae..0cc4b2d4 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java @@ -525,4 +525,45 @@ com.google.cloud.dataproc.v1.NodeInitializationActionOrBuilder getInitialization * */ com.google.cloud.dataproc.v1.SecurityConfigOrBuilder getSecurityConfigOrBuilder(); + + /** + * + * + *
+   * Optional. Lifecycle setting for the cluster.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LifecycleConfig lifecycle_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the lifecycleConfig field is set. + */ + boolean hasLifecycleConfig(); + /** + * + * + *
+   * Optional. Lifecycle setting for the cluster.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LifecycleConfig lifecycle_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The lifecycleConfig. + */ + com.google.cloud.dataproc.v1.LifecycleConfig getLifecycleConfig(); + /** + * + * + *
+   * Optional. Lifecycle setting for the cluster.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LifecycleConfig lifecycle_config = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.LifecycleConfigOrBuilder getLifecycleConfigOrBuilder(); } diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatus.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatus.java index 78b0eec8..603dd511 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatus.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatus.java @@ -619,7 +619,8 @@ public com.google.protobuf.ByteString getDetailBytes() { * * *
-   * Output only. Time when this state was entered.
+   * Output only. Time when this state was entered (see JSON representation of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
    * 
* * @@ -635,7 +636,8 @@ public boolean hasStateStartTime() { * * *
-   * Output only. Time when this state was entered.
+   * Output only. Time when this state was entered (see JSON representation of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
    * 
* * @@ -653,7 +655,8 @@ public com.google.protobuf.Timestamp getStateStartTime() { * * *
-   * Output only. Time when this state was entered.
+   * Output only. Time when this state was entered (see JSON representation of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
    * 
* * @@ -1306,7 +1309,8 @@ public Builder setDetailBytes(com.google.protobuf.ByteString value) { * * *
-     * Output only. Time when this state was entered.
+     * Output only. Time when this state was entered (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * @@ -1322,7 +1326,8 @@ public boolean hasStateStartTime() { * * *
-     * Output only. Time when this state was entered.
+     * Output only. Time when this state was entered (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * @@ -1344,7 +1349,8 @@ public com.google.protobuf.Timestamp getStateStartTime() { * * *
-     * Output only. Time when this state was entered.
+     * Output only. Time when this state was entered (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * @@ -1368,7 +1374,8 @@ public Builder setStateStartTime(com.google.protobuf.Timestamp value) { * * *
-     * Output only. Time when this state was entered.
+     * Output only. Time when this state was entered (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * @@ -1389,7 +1396,8 @@ public Builder setStateStartTime(com.google.protobuf.Timestamp.Builder builderFo * * *
-     * Output only. Time when this state was entered.
+     * Output only. Time when this state was entered (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * @@ -1417,7 +1425,8 @@ public Builder mergeStateStartTime(com.google.protobuf.Timestamp value) { * * *
-     * Output only. Time when this state was entered.
+     * Output only. Time when this state was entered (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * @@ -1439,7 +1448,8 @@ public Builder clearStateStartTime() { * * *
-     * Output only. Time when this state was entered.
+     * Output only. Time when this state was entered (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * @@ -1455,7 +1465,8 @@ public com.google.protobuf.Timestamp.Builder getStateStartTimeBuilder() { * * *
-     * Output only. Time when this state was entered.
+     * Output only. Time when this state was entered (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * @@ -1475,7 +1486,8 @@ public com.google.protobuf.TimestampOrBuilder getStateStartTimeOrBuilder() { * * *
-     * Output only. Time when this state was entered.
+     * Output only. Time when this state was entered (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
      * 
* * diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatusOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatusOrBuilder.java index 87693046..9abedcb6 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatusOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatusOrBuilder.java @@ -85,7 +85,8 @@ public interface ClusterStatusOrBuilder * * *
-   * Output only. Time when this state was entered.
+   * Output only. Time when this state was entered (see JSON representation of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
    * 
* * @@ -99,7 +100,8 @@ public interface ClusterStatusOrBuilder * * *
-   * Output only. Time when this state was entered.
+   * Output only. Time when this state was entered (see JSON representation of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
    * 
* * @@ -113,7 +115,8 @@ public interface ClusterStatusOrBuilder * * *
-   * Output only. Time when this state was entered.
+   * Output only. Time when this state was entered (see JSON representation of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
    * 
* * diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClustersProto.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClustersProto.java index 54bb2249..b87b3151 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClustersProto.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClustersProto.java @@ -95,6 +95,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_cloud_dataproc_v1_SoftwareConfig_PropertiesEntry_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_cloud_dataproc_v1_SoftwareConfig_PropertiesEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_LifecycleConfig_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_LifecycleConfig_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_google_cloud_dataproc_v1_ClusterMetrics_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -139,6 +143,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_cloud_dataproc_v1_DiagnoseClusterResults_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_cloud_dataproc_v1_DiagnoseClusterResults_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_ReservationAffinity_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_ReservationAffinity_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; @@ -151,184 +159,199 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "\n\'google/cloud/dataproc/v1/clusters.prot" + "o\022\030google.cloud.dataproc.v1\032\034google/api/" + "annotations.proto\032\027google/api/client.pro" - + "to\032\037google/api/field_behavior.proto\032)goo" - + "gle/cloud/dataproc/v1/operations.proto\032%" - + "google/cloud/dataproc/v1/shared.proto\032#g" - + "oogle/longrunning/operations.proto\032\036goog" - + "le/protobuf/duration.proto\032 google/proto" - + "buf/field_mask.proto\032\037google/protobuf/ti" - + "mestamp.proto\"\310\003\n\007Cluster\022\027\n\nproject_id\030" - + "\001 \001(\tB\003\340A\002\022\031\n\014cluster_name\030\002 \001(\tB\003\340A\002\022<\n" - + "\006config\030\003 \001(\0132\'.google.cloud.dataproc.v1" - + ".ClusterConfigB\003\340A\002\022B\n\006labels\030\010 \003(\0132-.go" - + "ogle.cloud.dataproc.v1.Cluster.LabelsEnt" - + "ryB\003\340A\001\022<\n\006status\030\004 \001(\0132\'.google.cloud.d" - + "ataproc.v1.ClusterStatusB\003\340A\003\022D\n\016status_" - + "history\030\007 \003(\0132\'.google.cloud.dataproc.v1" - + ".ClusterStatusB\003\340A\003\022\031\n\014cluster_uuid\030\006 \001(" - + "\tB\003\340A\003\0229\n\007metrics\030\t \001(\0132(.google.cloud.d" - + "ataproc.v1.ClusterMetrics\032-\n\013LabelsEntry" - + "\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\346\005\n\rClu" - + "sterConfig\022\032\n\rconfig_bucket\030\001 \001(\tB\003\340A\001\022K" - + "\n\022gce_cluster_config\030\010 \001(\0132*.google.clou" - + "d.dataproc.v1.GceClusterConfigB\003\340A\001\022I\n\rm" - + "aster_config\030\t \001(\0132-.google.cloud.datapr" - + "oc.v1.InstanceGroupConfigB\003\340A\001\022I\n\rworker" - + "_config\030\n \001(\0132-.google.cloud.dataproc.v1" - + ".InstanceGroupConfigB\003\340A\001\022S\n\027secondary_w" - + "orker_config\030\014 \001(\0132-.google.cloud.datapr" - + "oc.v1.InstanceGroupConfigB\003\340A\001\022F\n\017softwa" - + "re_config\030\r \001(\0132(.google.cloud.dataproc." - + "v1.SoftwareConfigB\003\340A\001\022W\n\026initialization" - + "_actions\030\013 \003(\01322.google.cloud.dataproc.v" - + "1.NodeInitializationActionB\003\340A\001\022J\n\021encry" - + "ption_config\030\017 \001(\0132*.google.cloud.datapr" - + "oc.v1.EncryptionConfigB\003\340A\001\022L\n\022autoscali" - + "ng_config\030\022 \001(\0132+.google.cloud.dataproc." - + "v1.AutoscalingConfigB\003\340A\001\022F\n\017security_co" - + "nfig\030\020 \001(\0132(.google.cloud.dataproc.v1.Se" - + "curityConfigB\003\340A\001\",\n\021AutoscalingConfig\022\027" - + "\n\npolicy_uri\030\001 \001(\tB\003\340A\001\"4\n\020EncryptionCon" - + "fig\022 \n\023gce_pd_kms_key_name\030\001 \001(\tB\003\340A\001\"\315\002" - + "\n\020GceClusterConfig\022\025\n\010zone_uri\030\001 \001(\tB\003\340A" - + "\001\022\030\n\013network_uri\030\002 \001(\tB\003\340A\001\022\033\n\016subnetwor" - + "k_uri\030\006 \001(\tB\003\340A\001\022\035\n\020internal_ip_only\030\007 \001" - + "(\010B\003\340A\001\022\034\n\017service_account\030\010 \001(\tB\003\340A\001\022#\n" - + "\026service_account_scopes\030\003 \003(\tB\003\340A\001\022\014\n\004ta" - + "gs\030\004 \003(\t\022J\n\010metadata\030\005 \003(\01328.google.clou" - + "d.dataproc.v1.GceClusterConfig.MetadataE" - + "ntry\032/\n\rMetadataEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005va" - + "lue\030\002 \001(\t:\0028\001\"\232\003\n\023InstanceGroupConfig\022\032\n" - + "\rnum_instances\030\001 \001(\005B\003\340A\001\022\033\n\016instance_na" - + "mes\030\002 \003(\tB\003\340A\003\022\026\n\timage_uri\030\003 \001(\tB\003\340A\001\022\035" - + "\n\020machine_type_uri\030\004 \001(\tB\003\340A\001\022>\n\013disk_co" - + "nfig\030\005 \001(\0132$.google.cloud.dataproc.v1.Di" - + "skConfigB\003\340A\001\022\033\n\016is_preemptible\030\006 \001(\010B\003\340" - + "A\001\022O\n\024managed_group_config\030\007 \001(\0132,.googl" - + "e.cloud.dataproc.v1.ManagedGroupConfigB\003" - + "\340A\003\022F\n\014accelerators\030\010 \003(\0132+.google.cloud" - + ".dataproc.v1.AcceleratorConfigB\003\340A\001\022\035\n\020m" - + "in_cpu_platform\030\t \001(\tB\003\340A\001\"c\n\022ManagedGro" - + "upConfig\022#\n\026instance_template_name\030\001 \001(\t" - + "B\003\340A\003\022(\n\033instance_group_manager_name\030\002 \001" - + "(\tB\003\340A\003\"L\n\021AcceleratorConfig\022\034\n\024accelera" - + "tor_type_uri\030\001 \001(\t\022\031\n\021accelerator_count\030" - + "\002 \001(\005\"f\n\nDiskConfig\022\033\n\016boot_disk_type\030\003 " - + "\001(\tB\003\340A\001\022\036\n\021boot_disk_size_gb\030\001 \001(\005B\003\340A\001" - + "\022\033\n\016num_local_ssds\030\002 \001(\005B\003\340A\001\"s\n\030NodeIni" - + "tializationAction\022\034\n\017executable_file\030\001 \001" - + "(\tB\003\340A\002\0229\n\021execution_timeout\030\002 \001(\0132\031.goo" - + "gle.protobuf.DurationB\003\340A\001\"\204\003\n\rClusterSt" - + "atus\022A\n\005state\030\001 \001(\0162-.google.cloud.datap" - + "roc.v1.ClusterStatus.StateB\003\340A\003\022\026\n\006detai" - + "l\030\002 \001(\tB\006\340A\003\340A\001\0229\n\020state_start_time\030\003 \001(" - + "\0132\032.google.protobuf.TimestampB\003\340A\003\022G\n\010su" - + "bstate\030\004 \001(\01620.google.cloud.dataproc.v1." - + "ClusterStatus.SubstateB\003\340A\003\"V\n\005State\022\013\n\007" - + "UNKNOWN\020\000\022\014\n\010CREATING\020\001\022\013\n\007RUNNING\020\002\022\t\n\005" - + "ERROR\020\003\022\014\n\010DELETING\020\004\022\014\n\010UPDATING\020\005\"<\n\010S" - + "ubstate\022\017\n\013UNSPECIFIED\020\000\022\r\n\tUNHEALTHY\020\001\022" - + "\020\n\014STALE_STATUS\020\002\"S\n\016SecurityConfig\022A\n\017k" - + "erberos_config\030\001 \001(\0132(.google.cloud.data" - + "proc.v1.KerberosConfig\"\220\004\n\016KerberosConfi" - + "g\022\034\n\017enable_kerberos\030\001 \001(\010B\003\340A\001\022(\n\033root_" - + "principal_password_uri\030\002 \001(\tB\003\340A\002\022\030\n\013kms" - + "_key_uri\030\003 \001(\tB\003\340A\002\022\031\n\014keystore_uri\030\004 \001(" - + "\tB\003\340A\001\022\033\n\016truststore_uri\030\005 \001(\tB\003\340A\001\022\"\n\025k" - + "eystore_password_uri\030\006 \001(\tB\003\340A\001\022\035\n\020key_p" - + "assword_uri\030\007 \001(\tB\003\340A\001\022$\n\027truststore_pas" - + "sword_uri\030\010 \001(\tB\003\340A\001\022$\n\027cross_realm_trus" - + "t_realm\030\t \001(\tB\003\340A\001\022\"\n\025cross_realm_trust_" - + "kdc\030\n \001(\tB\003\340A\001\022+\n\036cross_realm_trust_admi" - + "n_server\030\013 \001(\tB\003\340A\001\0222\n%cross_realm_trust" - + "_shared_password_uri\030\014 \001(\tB\003\340A\001\022\033\n\016kdc_d" - + "b_key_uri\030\r \001(\tB\003\340A\001\022\037\n\022tgt_lifetime_hou" - + "rs\030\016 \001(\005B\003\340A\001\022\022\n\005realm\030\017 \001(\tB\003\340A\001\"\371\001\n\016So" - + "ftwareConfig\022\032\n\rimage_version\030\001 \001(\tB\003\340A\001" - + "\022Q\n\nproperties\030\002 \003(\01328.google.cloud.data" - + "proc.v1.SoftwareConfig.PropertiesEntryB\003" - + "\340A\001\022E\n\023optional_components\030\003 \003(\0162#.googl" - + "e.cloud.dataproc.v1.ComponentB\003\340A\001\0321\n\017Pr" - + "opertiesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(" - + "\t:\0028\001\"\232\002\n\016ClusterMetrics\022O\n\014hdfs_metrics" - + "\030\001 \003(\01329.google.cloud.dataproc.v1.Cluste" - + "rMetrics.HdfsMetricsEntry\022O\n\014yarn_metric" - + "s\030\002 \003(\01329.google.cloud.dataproc.v1.Clust" - + "erMetrics.YarnMetricsEntry\0322\n\020HdfsMetric" - + "sEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\003:\0028\001\0322" - + "\n\020YarnMetricsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value" - + "\030\002 \001(\003:\0028\001\"\226\001\n\024CreateClusterRequest\022\027\n\np" - + "roject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340A" - + "\002\0227\n\007cluster\030\002 \001(\0132!.google.cloud.datapr" - + "oc.v1.ClusterB\003\340A\002\022\027\n\nrequest_id\030\004 \001(\tB\003" - + "\340A\001\"\256\002\n\024UpdateClusterRequest\022\027\n\nproject_" - + "id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\005 \001(\tB\003\340A\002\022\031\n\014cl" - + "uster_name\030\002 \001(\tB\003\340A\002\0227\n\007cluster\030\003 \001(\0132!" - + ".google.cloud.dataproc.v1.ClusterB\003\340A\002\022E" - + "\n\035graceful_decommission_timeout\030\006 \001(\0132\031." - + "google.protobuf.DurationB\003\340A\001\0224\n\013update_" - + "mask\030\004 \001(\0132\032.google.protobuf.FieldMaskB\003" - + "\340A\002\022\027\n\nrequest_id\030\007 \001(\tB\003\340A\001\"\223\001\n\024DeleteC" - + "lusterRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023" - + "\n\006region\030\003 \001(\tB\003\340A\002\022\031\n\014cluster_name\030\002 \001(" - + "\tB\003\340A\002\022\031\n\014cluster_uuid\030\004 \001(\tB\003\340A\001\022\027\n\nreq" - + "uest_id\030\005 \001(\tB\003\340A\001\"\\\n\021GetClusterRequest\022" - + "\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001(\t" - + "B\003\340A\002\022\031\n\014cluster_name\030\002 \001(\tB\003\340A\002\"\211\001\n\023Lis" - + "tClustersRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A" - + "\002\022\023\n\006region\030\004 \001(\tB\003\340A\002\022\023\n\006filter\030\005 \001(\tB\003" - + "\340A\001\022\026\n\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\npage_toke" - + "n\030\003 \001(\tB\003\340A\001\"n\n\024ListClustersResponse\0228\n\010" - + "clusters\030\001 \003(\0132!.google.cloud.dataproc.v" - + "1.ClusterB\003\340A\003\022\034\n\017next_page_token\030\002 \001(\tB" - + "\003\340A\003\"a\n\026DiagnoseClusterRequest\022\027\n\nprojec" - + "t_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340A\002\022\031\n\014" - + "cluster_name\030\002 \001(\tB\003\340A\002\"1\n\026DiagnoseClust" - + "erResults\022\027\n\noutput_uri\030\001 \001(\tB\003\340A\0032\343\014\n\021C" - + "lusterController\022\200\002\n\rCreateCluster\022..goo" - + "gle.cloud.dataproc.v1.CreateClusterReque" - + "st\032\035.google.longrunning.Operation\"\237\001\202\323\344\223" - + "\002>\"3/v1/projects/{project_id}/regions/{r" - + "egion}/clusters:\007cluster\332A\031project_id,re" - + "gion,cluster\312A<\n\007Cluster\0221google.cloud.d" - + "ataproc.v1.ClusterOperationMetadata\022\250\002\n\r" - + "UpdateCluster\022..google.cloud.dataproc.v1" - + ".UpdateClusterRequest\032\035.google.longrunni" - + "ng.Operation\"\307\001\202\323\344\223\002M2B/v1/projects/{pro" - + "ject_id}/regions/{region}/clusters/{clus" - + "ter_name}:\007cluster\312A<\n\007Cluster\0221google.c" - + "loud.dataproc.v1.ClusterOperationMetadat" - + "a\332A2project_id,region,cluster_name,clust" - + "er,update_mask\022\231\002\n\rDeleteCluster\022..googl" - + "e.cloud.dataproc.v1.DeleteClusterRequest" - + "\032\035.google.longrunning.Operation\"\270\001\202\323\344\223\002D" - + "*B/v1/projects/{project_id}/regions/{reg" - + "ion}/clusters/{cluster_name}\332A\036project_i" - + "d,region,cluster_name\312AJ\n\025google.protobu" - + "f.Empty\0221google.cloud.dataproc.v1.Cluste" - + "rOperationMetadata\022\311\001\n\nGetCluster\022+.goog" - + "le.cloud.dataproc.v1.GetClusterRequest\032!" - + ".google.cloud.dataproc.v1.Cluster\"k\202\323\344\223\002" - + "D\022B/v1/projects/{project_id}/regions/{re" - + "gion}/clusters/{cluster_name}\332A\036project_" - + "id,region,cluster_name\022\331\001\n\014ListClusters\022" - + "-.google.cloud.dataproc.v1.ListClustersR" - + "equest\032..google.cloud.dataproc.v1.ListCl" - + "ustersResponse\"j\202\323\344\223\0025\0223/v1/projects/{pr" - + "oject_id}/regions/{region}/clusters\332A\021pr" - + "oject_id,region\332A\030project_id,region,filt" - + "er\022\216\002\n\017DiagnoseCluster\0220.google.cloud.da" - + "taproc.v1.DiagnoseClusterRequest\032\035.googl" - + "e.longrunning.Operation\"\251\001\202\323\344\223\002P\"K/v1/pr" - + "ojects/{project_id}/regions/{region}/clu" - + "sters/{cluster_name}:diagnose:\001*\332A\036proje" - + "ct_id,region,cluster_name\312A/\n\025google.pro" - + "tobuf.Empty\022\026DiagnoseClusterResults\032K\312A\027" - + "dataproc.googleapis.com\322A.https://www.go" - + "ogleapis.com/auth/cloud-platformBq\n\034com." - + "google.cloud.dataproc.v1B\rClustersProtoP" - + "\001Z@google.golang.org/genproto/googleapis" - + "/cloud/dataproc/v1;dataprocb\006proto3" + + "to\032\037google/api/field_behavior.proto\032%goo" + + "gle/cloud/dataproc/v1/shared.proto\032#goog" + + "le/longrunning/operations.proto\032\036google/" + + "protobuf/duration.proto\032 google/protobuf" + + "/field_mask.proto\032\037google/protobuf/times" + + "tamp.proto\"\310\003\n\007Cluster\022\027\n\nproject_id\030\001 \001" + + "(\tB\003\340A\002\022\031\n\014cluster_name\030\002 \001(\tB\003\340A\002\022<\n\006co" + + "nfig\030\003 \001(\0132\'.google.cloud.dataproc.v1.Cl" + + "usterConfigB\003\340A\002\022B\n\006labels\030\010 \003(\0132-.googl" + + "e.cloud.dataproc.v1.Cluster.LabelsEntryB" + + "\003\340A\001\022<\n\006status\030\004 \001(\0132\'.google.cloud.data" + + "proc.v1.ClusterStatusB\003\340A\003\022D\n\016status_his" + + "tory\030\007 \003(\0132\'.google.cloud.dataproc.v1.Cl" + + "usterStatusB\003\340A\003\022\031\n\014cluster_uuid\030\006 \001(\tB\003" + + "\340A\003\0229\n\007metrics\030\t \001(\0132(.google.cloud.data" + + "proc.v1.ClusterMetrics\032-\n\013LabelsEntry\022\013\n" + + "\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\260\006\n\rCluste" + + "rConfig\022\032\n\rconfig_bucket\030\001 \001(\tB\003\340A\001\022K\n\022g" + + "ce_cluster_config\030\010 \001(\0132*.google.cloud.d" + + "ataproc.v1.GceClusterConfigB\003\340A\001\022I\n\rmast" + + "er_config\030\t \001(\0132-.google.cloud.dataproc." + + "v1.InstanceGroupConfigB\003\340A\001\022I\n\rworker_co" + + "nfig\030\n \001(\0132-.google.cloud.dataproc.v1.In" + + "stanceGroupConfigB\003\340A\001\022S\n\027secondary_work" + + "er_config\030\014 \001(\0132-.google.cloud.dataproc." + + "v1.InstanceGroupConfigB\003\340A\001\022F\n\017software_" + + "config\030\r \001(\0132(.google.cloud.dataproc.v1." + + "SoftwareConfigB\003\340A\001\022W\n\026initialization_ac" + + "tions\030\013 \003(\01322.google.cloud.dataproc.v1.N" + + "odeInitializationActionB\003\340A\001\022J\n\021encrypti" + + "on_config\030\017 \001(\0132*.google.cloud.dataproc." + + "v1.EncryptionConfigB\003\340A\001\022L\n\022autoscaling_" + + "config\030\022 \001(\0132+.google.cloud.dataproc.v1." + + "AutoscalingConfigB\003\340A\001\022F\n\017security_confi" + + "g\030\020 \001(\0132(.google.cloud.dataproc.v1.Secur" + + "ityConfigB\003\340A\001\022H\n\020lifecycle_config\030\021 \001(\013" + + "2).google.cloud.dataproc.v1.LifecycleCon" + + "figB\003\340A\001\",\n\021AutoscalingConfig\022\027\n\npolicy_" + + "uri\030\001 \001(\tB\003\340A\001\"4\n\020EncryptionConfig\022 \n\023gc" + + "e_pd_kms_key_name\030\001 \001(\tB\003\340A\001\"\237\003\n\020GceClus" + + "terConfig\022\025\n\010zone_uri\030\001 \001(\tB\003\340A\001\022\030\n\013netw" + + "ork_uri\030\002 \001(\tB\003\340A\001\022\033\n\016subnetwork_uri\030\006 \001" + + "(\tB\003\340A\001\022\035\n\020internal_ip_only\030\007 \001(\010B\003\340A\001\022\034" + + "\n\017service_account\030\010 \001(\tB\003\340A\001\022#\n\026service_" + + "account_scopes\030\003 \003(\tB\003\340A\001\022\014\n\004tags\030\004 \003(\t\022" + + "J\n\010metadata\030\005 \003(\01328.google.cloud.datapro" + + "c.v1.GceClusterConfig.MetadataEntry\022P\n\024r" + + "eservation_affinity\030\013 \001(\0132-.google.cloud" + + ".dataproc.v1.ReservationAffinityB\003\340A\001\032/\n" + + "\rMetadataEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001" + + "(\t:\0028\001\"\232\003\n\023InstanceGroupConfig\022\032\n\rnum_in" + + "stances\030\001 \001(\005B\003\340A\001\022\033\n\016instance_names\030\002 \003" + + "(\tB\003\340A\003\022\026\n\timage_uri\030\003 \001(\tB\003\340A\001\022\035\n\020machi" + + "ne_type_uri\030\004 \001(\tB\003\340A\001\022>\n\013disk_config\030\005 " + + "\001(\0132$.google.cloud.dataproc.v1.DiskConfi" + + "gB\003\340A\001\022\033\n\016is_preemptible\030\006 \001(\010B\003\340A\001\022O\n\024m" + + "anaged_group_config\030\007 \001(\0132,.google.cloud" + + ".dataproc.v1.ManagedGroupConfigB\003\340A\003\022F\n\014" + + "accelerators\030\010 \003(\0132+.google.cloud.datapr" + + "oc.v1.AcceleratorConfigB\003\340A\001\022\035\n\020min_cpu_" + + "platform\030\t \001(\tB\003\340A\001\"c\n\022ManagedGroupConfi" + + "g\022#\n\026instance_template_name\030\001 \001(\tB\003\340A\003\022(" + + "\n\033instance_group_manager_name\030\002 \001(\tB\003\340A\003" + + "\"L\n\021AcceleratorConfig\022\034\n\024accelerator_typ" + + "e_uri\030\001 \001(\t\022\031\n\021accelerator_count\030\002 \001(\005\"f" + + "\n\nDiskConfig\022\033\n\016boot_disk_type\030\003 \001(\tB\003\340A" + + "\001\022\036\n\021boot_disk_size_gb\030\001 \001(\005B\003\340A\001\022\033\n\016num" + + "_local_ssds\030\002 \001(\005B\003\340A\001\"s\n\030NodeInitializa" + + "tionAction\022\034\n\017executable_file\030\001 \001(\tB\003\340A\002" + + "\0229\n\021execution_timeout\030\002 \001(\0132\031.google.pro" + + "tobuf.DurationB\003\340A\001\"\204\003\n\rClusterStatus\022A\n" + + "\005state\030\001 \001(\0162-.google.cloud.dataproc.v1." + + "ClusterStatus.StateB\003\340A\003\022\026\n\006detail\030\002 \001(\t" + + "B\006\340A\003\340A\001\0229\n\020state_start_time\030\003 \001(\0132\032.goo" + + "gle.protobuf.TimestampB\003\340A\003\022G\n\010substate\030" + + "\004 \001(\01620.google.cloud.dataproc.v1.Cluster" + + "Status.SubstateB\003\340A\003\"V\n\005State\022\013\n\007UNKNOWN" + + "\020\000\022\014\n\010CREATING\020\001\022\013\n\007RUNNING\020\002\022\t\n\005ERROR\020\003" + + "\022\014\n\010DELETING\020\004\022\014\n\010UPDATING\020\005\"<\n\010Substate" + + "\022\017\n\013UNSPECIFIED\020\000\022\r\n\tUNHEALTHY\020\001\022\020\n\014STAL" + + "E_STATUS\020\002\"S\n\016SecurityConfig\022A\n\017kerberos" + + "_config\030\001 \001(\0132(.google.cloud.dataproc.v1" + + ".KerberosConfig\"\220\004\n\016KerberosConfig\022\034\n\017en" + + "able_kerberos\030\001 \001(\010B\003\340A\001\022(\n\033root_princip" + + "al_password_uri\030\002 \001(\tB\003\340A\002\022\030\n\013kms_key_ur" + + "i\030\003 \001(\tB\003\340A\002\022\031\n\014keystore_uri\030\004 \001(\tB\003\340A\001\022" + + "\033\n\016truststore_uri\030\005 \001(\tB\003\340A\001\022\"\n\025keystore" + + "_password_uri\030\006 \001(\tB\003\340A\001\022\035\n\020key_password" + + "_uri\030\007 \001(\tB\003\340A\001\022$\n\027truststore_password_u" + + "ri\030\010 \001(\tB\003\340A\001\022$\n\027cross_realm_trust_realm" + + "\030\t \001(\tB\003\340A\001\022\"\n\025cross_realm_trust_kdc\030\n \001" + + "(\tB\003\340A\001\022+\n\036cross_realm_trust_admin_serve" + + "r\030\013 \001(\tB\003\340A\001\0222\n%cross_realm_trust_shared" + + "_password_uri\030\014 \001(\tB\003\340A\001\022\033\n\016kdc_db_key_u" + + "ri\030\r \001(\tB\003\340A\001\022\037\n\022tgt_lifetime_hours\030\016 \001(" + + "\005B\003\340A\001\022\022\n\005realm\030\017 \001(\tB\003\340A\001\"\371\001\n\016SoftwareC" + + "onfig\022\032\n\rimage_version\030\001 \001(\tB\003\340A\001\022Q\n\npro" + + "perties\030\002 \003(\01328.google.cloud.dataproc.v1" + + ".SoftwareConfig.PropertiesEntryB\003\340A\001\022E\n\023" + + "optional_components\030\003 \003(\0162#.google.cloud" + + ".dataproc.v1.ComponentB\003\340A\001\0321\n\017Propertie" + + "sEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\203" + + "\002\n\017LifecycleConfig\0227\n\017idle_delete_ttl\030\001 " + + "\001(\0132\031.google.protobuf.DurationB\003\340A\001\022;\n\020a" + + "uto_delete_time\030\002 \001(\0132\032.google.protobuf." + + "TimestampB\003\340A\001H\000\0229\n\017auto_delete_ttl\030\003 \001(" + + "\0132\031.google.protobuf.DurationB\003\340A\001H\000\0228\n\017i" + + "dle_start_time\030\004 \001(\0132\032.google.protobuf.T" + + "imestampB\003\340A\003B\005\n\003ttl\"\232\002\n\016ClusterMetrics\022" + + "O\n\014hdfs_metrics\030\001 \003(\01329.google.cloud.dat" + + "aproc.v1.ClusterMetrics.HdfsMetricsEntry" + + "\022O\n\014yarn_metrics\030\002 \003(\01329.google.cloud.da" + + "taproc.v1.ClusterMetrics.YarnMetricsEntr" + + "y\0322\n\020HdfsMetricsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005va" + + "lue\030\002 \001(\003:\0028\001\0322\n\020YarnMetricsEntry\022\013\n\003key" + + "\030\001 \001(\t\022\r\n\005value\030\002 \001(\003:\0028\001\"\226\001\n\024CreateClus" + + "terRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006r" + + "egion\030\003 \001(\tB\003\340A\002\0227\n\007cluster\030\002 \001(\0132!.goog" + + "le.cloud.dataproc.v1.ClusterB\003\340A\002\022\027\n\nreq" + + "uest_id\030\004 \001(\tB\003\340A\001\"\256\002\n\024UpdateClusterRequ" + + "est\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\005" + + " \001(\tB\003\340A\002\022\031\n\014cluster_name\030\002 \001(\tB\003\340A\002\0227\n\007" + + "cluster\030\003 \001(\0132!.google.cloud.dataproc.v1" + + ".ClusterB\003\340A\002\022E\n\035graceful_decommission_t" + + "imeout\030\006 \001(\0132\031.google.protobuf.DurationB" + + "\003\340A\001\0224\n\013update_mask\030\004 \001(\0132\032.google.proto" + + "buf.FieldMaskB\003\340A\002\022\027\n\nrequest_id\030\007 \001(\tB\003" + + "\340A\001\"\223\001\n\024DeleteClusterRequest\022\027\n\nproject_" + + "id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340A\002\022\031\n\014cl" + + "uster_name\030\002 \001(\tB\003\340A\002\022\031\n\014cluster_uuid\030\004 " + + "\001(\tB\003\340A\001\022\027\n\nrequest_id\030\005 \001(\tB\003\340A\001\"\\\n\021Get" + + "ClusterRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022" + + "\023\n\006region\030\003 \001(\tB\003\340A\002\022\031\n\014cluster_name\030\002 \001" + + "(\tB\003\340A\002\"\211\001\n\023ListClustersRequest\022\027\n\nproje" + + "ct_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\004 \001(\tB\003\340A\002\022\023\n" + + "\006filter\030\005 \001(\tB\003\340A\001\022\026\n\tpage_size\030\002 \001(\005B\003\340" + + "A\001\022\027\n\npage_token\030\003 \001(\tB\003\340A\001\"n\n\024ListClust" + + "ersResponse\0228\n\010clusters\030\001 \003(\0132!.google.c" + + "loud.dataproc.v1.ClusterB\003\340A\003\022\034\n\017next_pa" + + "ge_token\030\002 \001(\tB\003\340A\003\"a\n\026DiagnoseClusterRe" + + "quest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region" + + "\030\003 \001(\tB\003\340A\002\022\031\n\014cluster_name\030\002 \001(\tB\003\340A\002\"1" + + "\n\026DiagnoseClusterResults\022\027\n\noutput_uri\030\001" + + " \001(\tB\003\340A\003\"\370\001\n\023ReservationAffinity\022Y\n\030con" + + "sume_reservation_type\030\001 \001(\01622.google.clo" + + "ud.dataproc.v1.ReservationAffinity.TypeB" + + "\003\340A\001\022\020\n\003key\030\002 \001(\tB\003\340A\001\022\023\n\006values\030\003 \003(\tB\003" + + "\340A\001\"_\n\004Type\022\024\n\020TYPE_UNSPECIFIED\020\000\022\022\n\016NO_" + + "RESERVATION\020\001\022\023\n\017ANY_RESERVATION\020\002\022\030\n\024SP" + + "ECIFIC_RESERVATION\020\0032\256\014\n\021ClusterControll" + + "er\022\200\002\n\rCreateCluster\022..google.cloud.data" + + "proc.v1.CreateClusterRequest\032\035.google.lo" + + "ngrunning.Operation\"\237\001\202\323\344\223\002>\"3/v1/projec" + + "ts/{project_id}/regions/{region}/cluster" + + "s:\007cluster\332A\031project_id,region,cluster\312A" + + "<\n\007Cluster\0221google.cloud.dataproc.v1.Clu" + + "sterOperationMetadata\022\363\001\n\rUpdateCluster\022" + + "..google.cloud.dataproc.v1.UpdateCluster" + + "Request\032\035.google.longrunning.Operation\"\222" + + "\001\202\323\344\223\002M2B/v1/projects/{project_id}/regio" + + "ns/{region}/clusters/{cluster_name}:\007clu" + + "ster\312A<\n\007Cluster\0221google.cloud.dataproc." + + "v1.ClusterOperationMetadata\022\231\002\n\rDeleteCl" + + "uster\022..google.cloud.dataproc.v1.DeleteC" + + "lusterRequest\032\035.google.longrunning.Opera" + + "tion\"\270\001\202\323\344\223\002D*B/v1/projects/{project_id}" + + "/regions/{region}/clusters/{cluster_name" + + "}\332A\036project_id,region,cluster_name\312AJ\n\025g" + + "oogle.protobuf.Empty\0221google.cloud.datap" + + "roc.v1.ClusterOperationMetadata\022\311\001\n\nGetC" + + "luster\022+.google.cloud.dataproc.v1.GetClu" + + "sterRequest\032!.google.cloud.dataproc.v1.C" + + "luster\"k\202\323\344\223\002D\022B/v1/projects/{project_id" + + "}/regions/{region}/clusters/{cluster_nam" + + "e}\332A\036project_id,region,cluster_name\022\331\001\n\014" + + "ListClusters\022-.google.cloud.dataproc.v1." + + "ListClustersRequest\032..google.cloud.datap" + + "roc.v1.ListClustersResponse\"j\202\323\344\223\0025\0223/v1" + + "/projects/{project_id}/regions/{region}/" + + "clusters\332A\021project_id,region\332A\030project_i" + + "d,region,filter\022\216\002\n\017DiagnoseCluster\0220.go" + + "ogle.cloud.dataproc.v1.DiagnoseClusterRe" + + "quest\032\035.google.longrunning.Operation\"\251\001\202" + + "\323\344\223\002P\"K/v1/projects/{project_id}/regions" + + "/{region}/clusters/{cluster_name}:diagno" + + "se:\001*\332A\036project_id,region,cluster_name\312A" + + "/\n\025google.protobuf.Empty\022\026DiagnoseCluste" + + "rResults\032K\312A\027dataproc.googleapis.com\322A.h" + + "ttps://www.googleapis.com/auth/cloud-pla" + + "tformBq\n\034com.google.cloud.dataproc.v1B\rC" + + "lustersProtoP\001Z@google.golang.org/genpro" + + "to/googleapis/cloud/dataproc/v1;dataproc" + + "b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -337,7 +360,6 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.api.AnnotationsProto.getDescriptor(), com.google.api.ClientProto.getDescriptor(), com.google.api.FieldBehaviorProto.getDescriptor(), - com.google.cloud.dataproc.v1.OperationsProto.getDescriptor(), com.google.cloud.dataproc.v1.SharedProto.getDescriptor(), com.google.longrunning.OperationsProto.getDescriptor(), com.google.protobuf.DurationProto.getDescriptor(), @@ -383,6 +405,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "EncryptionConfig", "AutoscalingConfig", "SecurityConfig", + "LifecycleConfig", }); internal_static_google_cloud_dataproc_v1_AutoscalingConfig_descriptor = getDescriptor().getMessageTypes().get(2); @@ -414,6 +437,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ServiceAccountScopes", "Tags", "Metadata", + "ReservationAffinity", }); internal_static_google_cloud_dataproc_v1_GceClusterConfig_MetadataEntry_descriptor = internal_static_google_cloud_dataproc_v1_GceClusterConfig_descriptor @@ -527,8 +551,16 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new java.lang.String[] { "Key", "Value", }); - internal_static_google_cloud_dataproc_v1_ClusterMetrics_descriptor = + internal_static_google_cloud_dataproc_v1_LifecycleConfig_descriptor = getDescriptor().getMessageTypes().get(14); + internal_static_google_cloud_dataproc_v1_LifecycleConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_LifecycleConfig_descriptor, + new java.lang.String[] { + "IdleDeleteTtl", "AutoDeleteTime", "AutoDeleteTtl", "IdleStartTime", "Ttl", + }); + internal_static_google_cloud_dataproc_v1_ClusterMetrics_descriptor = + getDescriptor().getMessageTypes().get(15); internal_static_google_cloud_dataproc_v1_ClusterMetrics_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_ClusterMetrics_descriptor, @@ -552,7 +584,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Key", "Value", }); internal_static_google_cloud_dataproc_v1_CreateClusterRequest_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(16); internal_static_google_cloud_dataproc_v1_CreateClusterRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_CreateClusterRequest_descriptor, @@ -560,7 +592,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ProjectId", "Region", "Cluster", "RequestId", }); internal_static_google_cloud_dataproc_v1_UpdateClusterRequest_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(17); internal_static_google_cloud_dataproc_v1_UpdateClusterRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_UpdateClusterRequest_descriptor, @@ -574,7 +606,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "RequestId", }); internal_static_google_cloud_dataproc_v1_DeleteClusterRequest_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(18); internal_static_google_cloud_dataproc_v1_DeleteClusterRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_DeleteClusterRequest_descriptor, @@ -582,7 +614,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ProjectId", "Region", "ClusterName", "ClusterUuid", "RequestId", }); internal_static_google_cloud_dataproc_v1_GetClusterRequest_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(19); internal_static_google_cloud_dataproc_v1_GetClusterRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_GetClusterRequest_descriptor, @@ -590,7 +622,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ProjectId", "Region", "ClusterName", }); internal_static_google_cloud_dataproc_v1_ListClustersRequest_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(20); internal_static_google_cloud_dataproc_v1_ListClustersRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_ListClustersRequest_descriptor, @@ -598,7 +630,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ProjectId", "Region", "Filter", "PageSize", "PageToken", }); internal_static_google_cloud_dataproc_v1_ListClustersResponse_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(21); internal_static_google_cloud_dataproc_v1_ListClustersResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_ListClustersResponse_descriptor, @@ -606,7 +638,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Clusters", "NextPageToken", }); internal_static_google_cloud_dataproc_v1_DiagnoseClusterRequest_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(22); internal_static_google_cloud_dataproc_v1_DiagnoseClusterRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_DiagnoseClusterRequest_descriptor, @@ -614,13 +646,21 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ProjectId", "Region", "ClusterName", }); internal_static_google_cloud_dataproc_v1_DiagnoseClusterResults_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(23); internal_static_google_cloud_dataproc_v1_DiagnoseClusterResults_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_DiagnoseClusterResults_descriptor, new java.lang.String[] { "OutputUri", }); + internal_static_google_cloud_dataproc_v1_ReservationAffinity_descriptor = + getDescriptor().getMessageTypes().get(24); + internal_static_google_cloud_dataproc_v1_ReservationAffinity_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_ReservationAffinity_descriptor, + new java.lang.String[] { + "ConsumeReservationType", "Key", "Values", + }); com.google.protobuf.ExtensionRegistry registry = com.google.protobuf.ExtensionRegistry.newInstance(); registry.add(com.google.api.ClientProto.defaultHost); @@ -634,7 +674,6 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.api.AnnotationsProto.getDescriptor(); com.google.api.ClientProto.getDescriptor(); com.google.api.FieldBehaviorProto.getDescriptor(); - com.google.cloud.dataproc.v1.OperationsProto.getDescriptor(); com.google.cloud.dataproc.v1.SharedProto.getDescriptor(); com.google.longrunning.OperationsProto.getDescriptor(); com.google.protobuf.DurationProto.getDescriptor(); diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java index e16119d5..c8ece660 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java @@ -143,6 +143,22 @@ private GceClusterConfig( java.lang.String s = input.readStringRequireUtf8(); serviceAccount_ = s; + break; + } + case 90: + { + com.google.cloud.dataproc.v1.ReservationAffinity.Builder subBuilder = null; + if (reservationAffinity_ != null) { + subBuilder = reservationAffinity_.toBuilder(); + } + reservationAffinity_ = + input.readMessage( + com.google.cloud.dataproc.v1.ReservationAffinity.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(reservationAffinity_); + reservationAffinity_ = subBuilder.buildPartial(); + } + break; } default: @@ -738,6 +754,58 @@ public java.lang.String getMetadataOrThrow(java.lang.String key) { return map.get(key); } + public static final int RESERVATION_AFFINITY_FIELD_NUMBER = 11; + private com.google.cloud.dataproc.v1.ReservationAffinity reservationAffinity_; + /** + * + * + *
+   * Optional. Reservation Affinity for consuming Zonal reservation.
+   * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity reservation_affinity = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the reservationAffinity field is set. + */ + public boolean hasReservationAffinity() { + return reservationAffinity_ != null; + } + /** + * + * + *
+   * Optional. Reservation Affinity for consuming Zonal reservation.
+   * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity reservation_affinity = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The reservationAffinity. + */ + public com.google.cloud.dataproc.v1.ReservationAffinity getReservationAffinity() { + return reservationAffinity_ == null + ? com.google.cloud.dataproc.v1.ReservationAffinity.getDefaultInstance() + : reservationAffinity_; + } + /** + * + * + *
+   * Optional. Reservation Affinity for consuming Zonal reservation.
+   * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity reservation_affinity = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.ReservationAffinityOrBuilder + getReservationAffinityOrBuilder() { + return getReservationAffinity(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -776,6 +844,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (!getServiceAccountBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 8, serviceAccount_); } + if (reservationAffinity_ != null) { + output.writeMessage(11, getReservationAffinity()); + } unknownFields.writeTo(output); } @@ -826,6 +897,10 @@ public int getSerializedSize() { if (!getServiceAccountBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(8, serviceAccount_); } + if (reservationAffinity_ != null) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(11, getReservationAffinity()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -850,6 +925,10 @@ public boolean equals(final java.lang.Object obj) { if (!getServiceAccountScopesList().equals(other.getServiceAccountScopesList())) return false; if (!getTagsList().equals(other.getTagsList())) return false; if (!internalGetMetadata().equals(other.internalGetMetadata())) return false; + if (hasReservationAffinity() != other.hasReservationAffinity()) return false; + if (hasReservationAffinity()) { + if (!getReservationAffinity().equals(other.getReservationAffinity())) return false; + } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -883,6 +962,10 @@ public int hashCode() { hash = (37 * hash) + METADATA_FIELD_NUMBER; hash = (53 * hash) + internalGetMetadata().hashCode(); } + if (hasReservationAffinity()) { + hash = (37 * hash) + RESERVATION_AFFINITY_FIELD_NUMBER; + hash = (53 * hash) + getReservationAffinity().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -1064,6 +1147,12 @@ public Builder clear() { tags_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); internalGetMutableMetadata().clear(); + if (reservationAffinityBuilder_ == null) { + reservationAffinity_ = null; + } else { + reservationAffinity_ = null; + reservationAffinityBuilder_ = null; + } return this; } @@ -1109,6 +1198,11 @@ public com.google.cloud.dataproc.v1.GceClusterConfig buildPartial() { result.tags_ = tags_; result.metadata_ = internalGetMetadata(); result.metadata_.makeImmutable(); + if (reservationAffinityBuilder_ == null) { + result.reservationAffinity_ = reservationAffinity_; + } else { + result.reservationAffinity_ = reservationAffinityBuilder_.build(); + } onBuilt(); return result; } @@ -1198,6 +1292,9 @@ public Builder mergeFrom(com.google.cloud.dataproc.v1.GceClusterConfig other) { onChanged(); } internalGetMutableMetadata().mergeFrom(other.internalGetMetadata()); + if (other.hasReservationAffinity()) { + mergeReservationAffinity(other.getReservationAffinity()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -2480,6 +2577,213 @@ public Builder putAllMetadata(java.util.Map return this; } + private com.google.cloud.dataproc.v1.ReservationAffinity reservationAffinity_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.ReservationAffinity, + com.google.cloud.dataproc.v1.ReservationAffinity.Builder, + com.google.cloud.dataproc.v1.ReservationAffinityOrBuilder> + reservationAffinityBuilder_; + /** + * + * + *
+     * Optional. Reservation Affinity for consuming Zonal reservation.
+     * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity reservation_affinity = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the reservationAffinity field is set. + */ + public boolean hasReservationAffinity() { + return reservationAffinityBuilder_ != null || reservationAffinity_ != null; + } + /** + * + * + *
+     * Optional. Reservation Affinity for consuming Zonal reservation.
+     * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity reservation_affinity = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The reservationAffinity. + */ + public com.google.cloud.dataproc.v1.ReservationAffinity getReservationAffinity() { + if (reservationAffinityBuilder_ == null) { + return reservationAffinity_ == null + ? com.google.cloud.dataproc.v1.ReservationAffinity.getDefaultInstance() + : reservationAffinity_; + } else { + return reservationAffinityBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Optional. Reservation Affinity for consuming Zonal reservation.
+     * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity reservation_affinity = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setReservationAffinity(com.google.cloud.dataproc.v1.ReservationAffinity value) { + if (reservationAffinityBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + reservationAffinity_ = value; + onChanged(); + } else { + reservationAffinityBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Reservation Affinity for consuming Zonal reservation.
+     * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity reservation_affinity = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setReservationAffinity( + com.google.cloud.dataproc.v1.ReservationAffinity.Builder builderForValue) { + if (reservationAffinityBuilder_ == null) { + reservationAffinity_ = builderForValue.build(); + onChanged(); + } else { + reservationAffinityBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Optional. Reservation Affinity for consuming Zonal reservation.
+     * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity reservation_affinity = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeReservationAffinity( + com.google.cloud.dataproc.v1.ReservationAffinity value) { + if (reservationAffinityBuilder_ == null) { + if (reservationAffinity_ != null) { + reservationAffinity_ = + com.google.cloud.dataproc.v1.ReservationAffinity.newBuilder(reservationAffinity_) + .mergeFrom(value) + .buildPartial(); + } else { + reservationAffinity_ = value; + } + onChanged(); + } else { + reservationAffinityBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Reservation Affinity for consuming Zonal reservation.
+     * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity reservation_affinity = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearReservationAffinity() { + if (reservationAffinityBuilder_ == null) { + reservationAffinity_ = null; + onChanged(); + } else { + reservationAffinity_ = null; + reservationAffinityBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Optional. Reservation Affinity for consuming Zonal reservation.
+     * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity reservation_affinity = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.ReservationAffinity.Builder + getReservationAffinityBuilder() { + + onChanged(); + return getReservationAffinityFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. Reservation Affinity for consuming Zonal reservation.
+     * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity reservation_affinity = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.ReservationAffinityOrBuilder + getReservationAffinityOrBuilder() { + if (reservationAffinityBuilder_ != null) { + return reservationAffinityBuilder_.getMessageOrBuilder(); + } else { + return reservationAffinity_ == null + ? com.google.cloud.dataproc.v1.ReservationAffinity.getDefaultInstance() + : reservationAffinity_; + } + } + /** + * + * + *
+     * Optional. Reservation Affinity for consuming Zonal reservation.
+     * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity reservation_affinity = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.ReservationAffinity, + com.google.cloud.dataproc.v1.ReservationAffinity.Builder, + com.google.cloud.dataproc.v1.ReservationAffinityOrBuilder> + getReservationAffinityFieldBuilder() { + if (reservationAffinityBuilder_ == null) { + reservationAffinityBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.ReservationAffinity, + com.google.cloud.dataproc.v1.ReservationAffinity.Builder, + com.google.cloud.dataproc.v1.ReservationAffinityOrBuilder>( + getReservationAffinity(), getParentForChildren(), isClean()); + reservationAffinity_ = null; + } + return reservationAffinityBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java index e4243f7c..2dfec7d0 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java @@ -414,4 +414,45 @@ public interface GceClusterConfigOrBuilder * map<string, string> metadata = 5; */ java.lang.String getMetadataOrThrow(java.lang.String key); + + /** + * + * + *
+   * Optional. Reservation Affinity for consuming Zonal reservation.
+   * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity reservation_affinity = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the reservationAffinity field is set. + */ + boolean hasReservationAffinity(); + /** + * + * + *
+   * Optional. Reservation Affinity for consuming Zonal reservation.
+   * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity reservation_affinity = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The reservationAffinity. + */ + com.google.cloud.dataproc.v1.ReservationAffinity getReservationAffinity(); + /** + * + * + *
+   * Optional. Reservation Affinity for consuming Zonal reservation.
+   * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity reservation_affinity = 11 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.ReservationAffinityOrBuilder getReservationAffinityOrBuilder(); } diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java index c3d0c592..1d3ab0c1 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java @@ -22,7 +22,7 @@ * * *
- * Optional. The config settings for Compute Engine resources in
+ * The config settings for Compute Engine resources in
  * an instance group, such as a master or worker group.
  * 
* @@ -911,7 +911,7 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
-   * Optional. The config settings for Compute Engine resources in
+   * The config settings for Compute Engine resources in
    * an instance group, such as a master or worker group.
    * 
* diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Job.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Job.java index 55c125b7..39fcea9d 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Job.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Job.java @@ -283,6 +283,22 @@ private Job( scheduling_ = subBuilder.buildPartial(); } + break; + } + case 170: + { + com.google.cloud.dataproc.v1.SparkRJob.Builder subBuilder = null; + if (typeJobCase_ == 21) { + subBuilder = ((com.google.cloud.dataproc.v1.SparkRJob) typeJob_).toBuilder(); + } + typeJob_ = + input.readMessage( + com.google.cloud.dataproc.v1.SparkRJob.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1.SparkRJob) typeJob_); + typeJob_ = subBuilder.buildPartial(); + } + typeJobCase_ = 21; break; } case 178: @@ -292,6 +308,22 @@ private Job( jobUuid_ = s; break; } + case 186: + { + com.google.cloud.dataproc.v1.PrestoJob.Builder subBuilder = null; + if (typeJobCase_ == 23) { + subBuilder = ((com.google.cloud.dataproc.v1.PrestoJob) typeJob_).toBuilder(); + } + typeJob_ = + input.readMessage( + com.google.cloud.dataproc.v1.PrestoJob.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1.PrestoJob) typeJob_); + typeJob_ = subBuilder.buildPartial(); + } + typeJobCase_ = 23; + break; + } default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { @@ -354,7 +386,9 @@ public enum TypeJobCase PYSPARK_JOB(5), HIVE_JOB(6), PIG_JOB(7), + SPARK_R_JOB(21), SPARK_SQL_JOB(12), + PRESTO_JOB(23), TYPEJOB_NOT_SET(0); private final int value; @@ -383,8 +417,12 @@ public static TypeJobCase forNumber(int value) { return HIVE_JOB; case 7: return PIG_JOB; + case 21: + return SPARK_R_JOB; case 12: return SPARK_SQL_JOB; + case 23: + return PRESTO_JOB; case 0: return TYPEJOB_NOT_SET; default: @@ -520,10 +558,12 @@ public com.google.cloud.dataproc.v1.JobPlacementOrBuilder getPlacementOrBuilder( * * *
-   * Job is a Hadoop job.
+   * Optional. Job is a Hadoop job.
    * 
* - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3; + * + * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the hadoopJob field is set. */ @@ -534,10 +574,12 @@ public boolean hasHadoopJob() { * * *
-   * Job is a Hadoop job.
+   * Optional. Job is a Hadoop job.
    * 
* - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3; + * + * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The hadoopJob. */ @@ -551,10 +593,12 @@ public com.google.cloud.dataproc.v1.HadoopJob getHadoopJob() { * * *
-   * Job is a Hadoop job.
+   * Optional. Job is a Hadoop job.
    * 
* - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3; + * + * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder() { if (typeJobCase_ == 3) { @@ -568,10 +612,12 @@ public com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder() { * * *
-   * Job is a Spark job.
+   * Optional. Job is a Spark job.
    * 
* - * .google.cloud.dataproc.v1.SparkJob spark_job = 4; + * + * .google.cloud.dataproc.v1.SparkJob spark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the sparkJob field is set. */ @@ -582,10 +628,12 @@ public boolean hasSparkJob() { * * *
-   * Job is a Spark job.
+   * Optional. Job is a Spark job.
    * 
* - * .google.cloud.dataproc.v1.SparkJob spark_job = 4; + * + * .google.cloud.dataproc.v1.SparkJob spark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The sparkJob. */ @@ -599,10 +647,12 @@ public com.google.cloud.dataproc.v1.SparkJob getSparkJob() { * * *
-   * Job is a Spark job.
+   * Optional. Job is a Spark job.
    * 
* - * .google.cloud.dataproc.v1.SparkJob spark_job = 4; + * + * .google.cloud.dataproc.v1.SparkJob spark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder() { if (typeJobCase_ == 4) { @@ -616,10 +666,12 @@ public com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder() { * * *
-   * Job is a Pyspark job.
+   * Optional. Job is a PySpark job.
    * 
* - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5; + * + * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the pysparkJob field is set. */ @@ -630,10 +682,12 @@ public boolean hasPysparkJob() { * * *
-   * Job is a Pyspark job.
+   * Optional. Job is a PySpark job.
    * 
* - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5; + * + * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The pysparkJob. */ @@ -647,10 +701,12 @@ public com.google.cloud.dataproc.v1.PySparkJob getPysparkJob() { * * *
-   * Job is a Pyspark job.
+   * Optional. Job is a PySpark job.
    * 
* - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5; + * + * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder() { if (typeJobCase_ == 5) { @@ -664,10 +720,11 @@ public com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder() * * *
-   * Job is a Hive job.
+   * Optional. Job is a Hive job.
    * 
* - * .google.cloud.dataproc.v1.HiveJob hive_job = 6; + * .google.cloud.dataproc.v1.HiveJob hive_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the hiveJob field is set. */ @@ -678,10 +735,11 @@ public boolean hasHiveJob() { * * *
-   * Job is a Hive job.
+   * Optional. Job is a Hive job.
    * 
* - * .google.cloud.dataproc.v1.HiveJob hive_job = 6; + * .google.cloud.dataproc.v1.HiveJob hive_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The hiveJob. */ @@ -695,10 +753,11 @@ public com.google.cloud.dataproc.v1.HiveJob getHiveJob() { * * *
-   * Job is a Hive job.
+   * Optional. Job is a Hive job.
    * 
* - * .google.cloud.dataproc.v1.HiveJob hive_job = 6; + * .google.cloud.dataproc.v1.HiveJob hive_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder() { if (typeJobCase_ == 6) { @@ -712,10 +771,11 @@ public com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder() { * * *
-   * Job is a Pig job.
+   * Optional. Job is a Pig job.
    * 
* - * .google.cloud.dataproc.v1.PigJob pig_job = 7; + * .google.cloud.dataproc.v1.PigJob pig_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the pigJob field is set. */ @@ -726,10 +786,11 @@ public boolean hasPigJob() { * * *
-   * Job is a Pig job.
+   * Optional. Job is a Pig job.
    * 
* - * .google.cloud.dataproc.v1.PigJob pig_job = 7; + * .google.cloud.dataproc.v1.PigJob pig_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The pigJob. */ @@ -743,10 +804,11 @@ public com.google.cloud.dataproc.v1.PigJob getPigJob() { * * *
-   * Job is a Pig job.
+   * Optional. Job is a Pig job.
    * 
* - * .google.cloud.dataproc.v1.PigJob pig_job = 7; + * .google.cloud.dataproc.v1.PigJob pig_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder() { if (typeJobCase_ == 7) { @@ -755,15 +817,71 @@ public com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder() { return com.google.cloud.dataproc.v1.PigJob.getDefaultInstance(); } + public static final int SPARK_R_JOB_FIELD_NUMBER = 21; + /** + * + * + *
+   * Optional. Job is a SparkR job.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the sparkRJob field is set. + */ + public boolean hasSparkRJob() { + return typeJobCase_ == 21; + } + /** + * + * + *
+   * Optional. Job is a SparkR job.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The sparkRJob. + */ + public com.google.cloud.dataproc.v1.SparkRJob getSparkRJob() { + if (typeJobCase_ == 21) { + return (com.google.cloud.dataproc.v1.SparkRJob) typeJob_; + } + return com.google.cloud.dataproc.v1.SparkRJob.getDefaultInstance(); + } + /** + * + * + *
+   * Optional. Job is a SparkR job.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.SparkRJobOrBuilder getSparkRJobOrBuilder() { + if (typeJobCase_ == 21) { + return (com.google.cloud.dataproc.v1.SparkRJob) typeJob_; + } + return com.google.cloud.dataproc.v1.SparkRJob.getDefaultInstance(); + } + public static final int SPARK_SQL_JOB_FIELD_NUMBER = 12; /** * * *
-   * Job is a SparkSql job.
+   * Optional. Job is a SparkSql job.
    * 
* - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12; + * + * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the sparkSqlJob field is set. */ @@ -774,10 +892,12 @@ public boolean hasSparkSqlJob() { * * *
-   * Job is a SparkSql job.
+   * Optional. Job is a SparkSql job.
    * 
* - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12; + * + * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The sparkSqlJob. */ @@ -791,10 +911,12 @@ public com.google.cloud.dataproc.v1.SparkSqlJob getSparkSqlJob() { * * *
-   * Job is a SparkSql job.
+   * Optional. Job is a SparkSql job.
    * 
* - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12; + * + * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder() { if (typeJobCase_ == 12) { @@ -803,6 +925,60 @@ public com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder return com.google.cloud.dataproc.v1.SparkSqlJob.getDefaultInstance(); } + public static final int PRESTO_JOB_FIELD_NUMBER = 23; + /** + * + * + *
+   * Optional. Job is a Presto job.
+   * 
+ * + * + * .google.cloud.dataproc.v1.PrestoJob presto_job = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the prestoJob field is set. + */ + public boolean hasPrestoJob() { + return typeJobCase_ == 23; + } + /** + * + * + *
+   * Optional. Job is a Presto job.
+   * 
+ * + * + * .google.cloud.dataproc.v1.PrestoJob presto_job = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The prestoJob. + */ + public com.google.cloud.dataproc.v1.PrestoJob getPrestoJob() { + if (typeJobCase_ == 23) { + return (com.google.cloud.dataproc.v1.PrestoJob) typeJob_; + } + return com.google.cloud.dataproc.v1.PrestoJob.getDefaultInstance(); + } + /** + * + * + *
+   * Optional. Job is a Presto job.
+   * 
+ * + * + * .google.cloud.dataproc.v1.PrestoJob presto_job = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.PrestoJobOrBuilder getPrestoJobOrBuilder() { + if (typeJobCase_ == 23) { + return (com.google.cloud.dataproc.v1.PrestoJob) typeJob_; + } + return com.google.cloud.dataproc.v1.PrestoJob.getDefaultInstance(); + } + public static final int STATUS_FIELD_NUMBER = 8; private com.google.cloud.dataproc.v1.JobStatus status_; /** @@ -1396,9 +1572,15 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (scheduling_ != null) { output.writeMessage(20, getScheduling()); } + if (typeJobCase_ == 21) { + output.writeMessage(21, (com.google.cloud.dataproc.v1.SparkRJob) typeJob_); + } if (!getJobUuidBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 22, jobUuid_); } + if (typeJobCase_ == 23) { + output.writeMessage(23, (com.google.cloud.dataproc.v1.PrestoJob) typeJob_); + } unknownFields.writeTo(output); } @@ -1473,9 +1655,19 @@ public int getSerializedSize() { if (scheduling_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(20, getScheduling()); } + if (typeJobCase_ == 21) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 21, (com.google.cloud.dataproc.v1.SparkRJob) typeJob_); + } if (!getJobUuidBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(22, jobUuid_); } + if (typeJobCase_ == 23) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 23, (com.google.cloud.dataproc.v1.PrestoJob) typeJob_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -1530,9 +1722,15 @@ public boolean equals(final java.lang.Object obj) { case 7: if (!getPigJob().equals(other.getPigJob())) return false; break; + case 21: + if (!getSparkRJob().equals(other.getSparkRJob())) return false; + break; case 12: if (!getSparkSqlJob().equals(other.getSparkSqlJob())) return false; break; + case 23: + if (!getPrestoJob().equals(other.getPrestoJob())) return false; + break; case 0: default: } @@ -1602,10 +1800,18 @@ public int hashCode() { hash = (37 * hash) + PIG_JOB_FIELD_NUMBER; hash = (53 * hash) + getPigJob().hashCode(); break; + case 21: + hash = (37 * hash) + SPARK_R_JOB_FIELD_NUMBER; + hash = (53 * hash) + getSparkRJob().hashCode(); + break; case 12: hash = (37 * hash) + SPARK_SQL_JOB_FIELD_NUMBER; hash = (53 * hash) + getSparkSqlJob().hashCode(); break; + case 23: + hash = (37 * hash) + PRESTO_JOB_FIELD_NUMBER; + hash = (53 * hash) + getPrestoJob().hashCode(); + break; case 0: default: } @@ -1893,6 +2099,13 @@ public com.google.cloud.dataproc.v1.Job buildPartial() { result.typeJob_ = pigJobBuilder_.build(); } } + if (typeJobCase_ == 21) { + if (sparkRJobBuilder_ == null) { + result.typeJob_ = typeJob_; + } else { + result.typeJob_ = sparkRJobBuilder_.build(); + } + } if (typeJobCase_ == 12) { if (sparkSqlJobBuilder_ == null) { result.typeJob_ = typeJob_; @@ -1900,6 +2113,13 @@ public com.google.cloud.dataproc.v1.Job buildPartial() { result.typeJob_ = sparkSqlJobBuilder_.build(); } } + if (typeJobCase_ == 23) { + if (prestoJobBuilder_ == null) { + result.typeJob_ = typeJob_; + } else { + result.typeJob_ = prestoJobBuilder_.build(); + } + } if (statusBuilder_ == null) { result.status_ = status_; } else { @@ -2088,11 +2308,21 @@ public Builder mergeFrom(com.google.cloud.dataproc.v1.Job other) { mergePigJob(other.getPigJob()); break; } + case SPARK_R_JOB: + { + mergeSparkRJob(other.getSparkRJob()); + break; + } case SPARK_SQL_JOB: { mergeSparkSqlJob(other.getSparkSqlJob()); break; } + case PRESTO_JOB: + { + mergePrestoJob(other.getPrestoJob()); + break; + } case TYPEJOB_NOT_SET: { break; @@ -2594,10 +2824,12 @@ public com.google.cloud.dataproc.v1.JobPlacementOrBuilder getPlacementOrBuilder( * * *
-     * Job is a Hadoop job.
+     * Optional. Job is a Hadoop job.
      * 
* - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3; + * + * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the hadoopJob field is set. */ @@ -2608,10 +2840,12 @@ public boolean hasHadoopJob() { * * *
-     * Job is a Hadoop job.
+     * Optional. Job is a Hadoop job.
      * 
* - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3; + * + * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The hadoopJob. */ @@ -2632,10 +2866,12 @@ public com.google.cloud.dataproc.v1.HadoopJob getHadoopJob() { * * *
-     * Job is a Hadoop job.
+     * Optional. Job is a Hadoop job.
      * 
* - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3; + * + * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder setHadoopJob(com.google.cloud.dataproc.v1.HadoopJob value) { if (hadoopJobBuilder_ == null) { @@ -2654,10 +2890,12 @@ public Builder setHadoopJob(com.google.cloud.dataproc.v1.HadoopJob value) { * * *
-     * Job is a Hadoop job.
+     * Optional. Job is a Hadoop job.
      * 
* - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3; + * + * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder setHadoopJob(com.google.cloud.dataproc.v1.HadoopJob.Builder builderForValue) { if (hadoopJobBuilder_ == null) { @@ -2673,10 +2911,12 @@ public Builder setHadoopJob(com.google.cloud.dataproc.v1.HadoopJob.Builder build * * *
-     * Job is a Hadoop job.
+     * Optional. Job is a Hadoop job.
      * 
* - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3; + * + * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder mergeHadoopJob(com.google.cloud.dataproc.v1.HadoopJob value) { if (hadoopJobBuilder_ == null) { @@ -2704,10 +2944,12 @@ public Builder mergeHadoopJob(com.google.cloud.dataproc.v1.HadoopJob value) { * * *
-     * Job is a Hadoop job.
+     * Optional. Job is a Hadoop job.
      * 
* - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3; + * + * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder clearHadoopJob() { if (hadoopJobBuilder_ == null) { @@ -2729,10 +2971,12 @@ public Builder clearHadoopJob() { * * *
-     * Job is a Hadoop job.
+     * Optional. Job is a Hadoop job.
      * 
* - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3; + * + * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.HadoopJob.Builder getHadoopJobBuilder() { return getHadoopJobFieldBuilder().getBuilder(); @@ -2741,10 +2985,12 @@ public com.google.cloud.dataproc.v1.HadoopJob.Builder getHadoopJobBuilder() { * * *
-     * Job is a Hadoop job.
+     * Optional. Job is a Hadoop job.
      * 
* - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3; + * + * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder() { if ((typeJobCase_ == 3) && (hadoopJobBuilder_ != null)) { @@ -2760,10 +3006,12 @@ public com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder() { * * *
-     * Job is a Hadoop job.
+     * Optional. Job is a Hadoop job.
      * 
* - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3; + * + * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.HadoopJob, @@ -2799,10 +3047,12 @@ public com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder() { * * *
-     * Job is a Spark job.
+     * Optional. Job is a Spark job.
      * 
* - * .google.cloud.dataproc.v1.SparkJob spark_job = 4; + * + * .google.cloud.dataproc.v1.SparkJob spark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the sparkJob field is set. */ @@ -2813,10 +3063,12 @@ public boolean hasSparkJob() { * * *
-     * Job is a Spark job.
+     * Optional. Job is a Spark job.
      * 
* - * .google.cloud.dataproc.v1.SparkJob spark_job = 4; + * + * .google.cloud.dataproc.v1.SparkJob spark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The sparkJob. */ @@ -2837,10 +3089,12 @@ public com.google.cloud.dataproc.v1.SparkJob getSparkJob() { * * *
-     * Job is a Spark job.
+     * Optional. Job is a Spark job.
      * 
* - * .google.cloud.dataproc.v1.SparkJob spark_job = 4; + * + * .google.cloud.dataproc.v1.SparkJob spark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder setSparkJob(com.google.cloud.dataproc.v1.SparkJob value) { if (sparkJobBuilder_ == null) { @@ -2859,10 +3113,12 @@ public Builder setSparkJob(com.google.cloud.dataproc.v1.SparkJob value) { * * *
-     * Job is a Spark job.
+     * Optional. Job is a Spark job.
      * 
* - * .google.cloud.dataproc.v1.SparkJob spark_job = 4; + * + * .google.cloud.dataproc.v1.SparkJob spark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder setSparkJob(com.google.cloud.dataproc.v1.SparkJob.Builder builderForValue) { if (sparkJobBuilder_ == null) { @@ -2878,10 +3134,12 @@ public Builder setSparkJob(com.google.cloud.dataproc.v1.SparkJob.Builder builder * * *
-     * Job is a Spark job.
+     * Optional. Job is a Spark job.
      * 
* - * .google.cloud.dataproc.v1.SparkJob spark_job = 4; + * + * .google.cloud.dataproc.v1.SparkJob spark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder mergeSparkJob(com.google.cloud.dataproc.v1.SparkJob value) { if (sparkJobBuilder_ == null) { @@ -2909,10 +3167,12 @@ public Builder mergeSparkJob(com.google.cloud.dataproc.v1.SparkJob value) { * * *
-     * Job is a Spark job.
+     * Optional. Job is a Spark job.
      * 
* - * .google.cloud.dataproc.v1.SparkJob spark_job = 4; + * + * .google.cloud.dataproc.v1.SparkJob spark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder clearSparkJob() { if (sparkJobBuilder_ == null) { @@ -2934,10 +3194,12 @@ public Builder clearSparkJob() { * * *
-     * Job is a Spark job.
+     * Optional. Job is a Spark job.
      * 
* - * .google.cloud.dataproc.v1.SparkJob spark_job = 4; + * + * .google.cloud.dataproc.v1.SparkJob spark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.SparkJob.Builder getSparkJobBuilder() { return getSparkJobFieldBuilder().getBuilder(); @@ -2946,10 +3208,12 @@ public com.google.cloud.dataproc.v1.SparkJob.Builder getSparkJobBuilder() { * * *
-     * Job is a Spark job.
+     * Optional. Job is a Spark job.
      * 
* - * .google.cloud.dataproc.v1.SparkJob spark_job = 4; + * + * .google.cloud.dataproc.v1.SparkJob spark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder() { if ((typeJobCase_ == 4) && (sparkJobBuilder_ != null)) { @@ -2965,10 +3229,12 @@ public com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder() { * * *
-     * Job is a Spark job.
+     * Optional. Job is a Spark job.
      * 
* - * .google.cloud.dataproc.v1.SparkJob spark_job = 4; + * + * .google.cloud.dataproc.v1.SparkJob spark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.SparkJob, @@ -3004,10 +3270,12 @@ public com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder() { * * *
-     * Job is a Pyspark job.
+     * Optional. Job is a PySpark job.
      * 
* - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5; + * + * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the pysparkJob field is set. */ @@ -3018,10 +3286,12 @@ public boolean hasPysparkJob() { * * *
-     * Job is a Pyspark job.
+     * Optional. Job is a PySpark job.
      * 
* - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5; + * + * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The pysparkJob. */ @@ -3042,10 +3312,12 @@ public com.google.cloud.dataproc.v1.PySparkJob getPysparkJob() { * * *
-     * Job is a Pyspark job.
+     * Optional. Job is a PySpark job.
      * 
* - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5; + * + * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder setPysparkJob(com.google.cloud.dataproc.v1.PySparkJob value) { if (pysparkJobBuilder_ == null) { @@ -3064,10 +3336,12 @@ public Builder setPysparkJob(com.google.cloud.dataproc.v1.PySparkJob value) { * * *
-     * Job is a Pyspark job.
+     * Optional. Job is a PySpark job.
      * 
* - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5; + * + * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder setPysparkJob(com.google.cloud.dataproc.v1.PySparkJob.Builder builderForValue) { if (pysparkJobBuilder_ == null) { @@ -3083,10 +3357,12 @@ public Builder setPysparkJob(com.google.cloud.dataproc.v1.PySparkJob.Builder bui * * *
-     * Job is a Pyspark job.
+     * Optional. Job is a PySpark job.
      * 
* - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5; + * + * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder mergePysparkJob(com.google.cloud.dataproc.v1.PySparkJob value) { if (pysparkJobBuilder_ == null) { @@ -3114,10 +3390,12 @@ public Builder mergePysparkJob(com.google.cloud.dataproc.v1.PySparkJob value) { * * *
-     * Job is a Pyspark job.
+     * Optional. Job is a PySpark job.
      * 
* - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5; + * + * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder clearPysparkJob() { if (pysparkJobBuilder_ == null) { @@ -3139,10 +3417,12 @@ public Builder clearPysparkJob() { * * *
-     * Job is a Pyspark job.
+     * Optional. Job is a PySpark job.
      * 
* - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5; + * + * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.PySparkJob.Builder getPysparkJobBuilder() { return getPysparkJobFieldBuilder().getBuilder(); @@ -3151,10 +3431,12 @@ public com.google.cloud.dataproc.v1.PySparkJob.Builder getPysparkJobBuilder() { * * *
-     * Job is a Pyspark job.
+     * Optional. Job is a PySpark job.
      * 
* - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5; + * + * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder() { if ((typeJobCase_ == 5) && (pysparkJobBuilder_ != null)) { @@ -3170,10 +3452,12 @@ public com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder() * * *
-     * Job is a Pyspark job.
+     * Optional. Job is a PySpark job.
      * 
* - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5; + * + * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.PySparkJob, @@ -3209,10 +3493,12 @@ public com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder() * * *
-     * Job is a Hive job.
+     * Optional. Job is a Hive job.
      * 
* - * .google.cloud.dataproc.v1.HiveJob hive_job = 6; + * + * .google.cloud.dataproc.v1.HiveJob hive_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the hiveJob field is set. */ @@ -3223,10 +3509,12 @@ public boolean hasHiveJob() { * * *
-     * Job is a Hive job.
+     * Optional. Job is a Hive job.
      * 
* - * .google.cloud.dataproc.v1.HiveJob hive_job = 6; + * + * .google.cloud.dataproc.v1.HiveJob hive_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The hiveJob. */ @@ -3247,10 +3535,12 @@ public com.google.cloud.dataproc.v1.HiveJob getHiveJob() { * * *
-     * Job is a Hive job.
+     * Optional. Job is a Hive job.
      * 
* - * .google.cloud.dataproc.v1.HiveJob hive_job = 6; + * + * .google.cloud.dataproc.v1.HiveJob hive_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder setHiveJob(com.google.cloud.dataproc.v1.HiveJob value) { if (hiveJobBuilder_ == null) { @@ -3269,10 +3559,12 @@ public Builder setHiveJob(com.google.cloud.dataproc.v1.HiveJob value) { * * *
-     * Job is a Hive job.
+     * Optional. Job is a Hive job.
      * 
* - * .google.cloud.dataproc.v1.HiveJob hive_job = 6; + * + * .google.cloud.dataproc.v1.HiveJob hive_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder setHiveJob(com.google.cloud.dataproc.v1.HiveJob.Builder builderForValue) { if (hiveJobBuilder_ == null) { @@ -3288,10 +3580,12 @@ public Builder setHiveJob(com.google.cloud.dataproc.v1.HiveJob.Builder builderFo * * *
-     * Job is a Hive job.
+     * Optional. Job is a Hive job.
      * 
* - * .google.cloud.dataproc.v1.HiveJob hive_job = 6; + * + * .google.cloud.dataproc.v1.HiveJob hive_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder mergeHiveJob(com.google.cloud.dataproc.v1.HiveJob value) { if (hiveJobBuilder_ == null) { @@ -3319,10 +3613,12 @@ public Builder mergeHiveJob(com.google.cloud.dataproc.v1.HiveJob value) { * * *
-     * Job is a Hive job.
+     * Optional. Job is a Hive job.
      * 
* - * .google.cloud.dataproc.v1.HiveJob hive_job = 6; + * + * .google.cloud.dataproc.v1.HiveJob hive_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder clearHiveJob() { if (hiveJobBuilder_ == null) { @@ -3344,10 +3640,12 @@ public Builder clearHiveJob() { * * *
-     * Job is a Hive job.
+     * Optional. Job is a Hive job.
      * 
* - * .google.cloud.dataproc.v1.HiveJob hive_job = 6; + * + * .google.cloud.dataproc.v1.HiveJob hive_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.HiveJob.Builder getHiveJobBuilder() { return getHiveJobFieldBuilder().getBuilder(); @@ -3356,10 +3654,12 @@ public com.google.cloud.dataproc.v1.HiveJob.Builder getHiveJobBuilder() { * * *
-     * Job is a Hive job.
+     * Optional. Job is a Hive job.
      * 
* - * .google.cloud.dataproc.v1.HiveJob hive_job = 6; + * + * .google.cloud.dataproc.v1.HiveJob hive_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder() { if ((typeJobCase_ == 6) && (hiveJobBuilder_ != null)) { @@ -3375,10 +3675,12 @@ public com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder() { * * *
-     * Job is a Hive job.
+     * Optional. Job is a Hive job.
      * 
* - * .google.cloud.dataproc.v1.HiveJob hive_job = 6; + * + * .google.cloud.dataproc.v1.HiveJob hive_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.HiveJob, @@ -3412,10 +3714,11 @@ public com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder() { * * *
-     * Job is a Pig job.
+     * Optional. Job is a Pig job.
      * 
* - * .google.cloud.dataproc.v1.PigJob pig_job = 7; + * .google.cloud.dataproc.v1.PigJob pig_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the pigJob field is set. */ @@ -3426,10 +3729,11 @@ public boolean hasPigJob() { * * *
-     * Job is a Pig job.
+     * Optional. Job is a Pig job.
      * 
* - * .google.cloud.dataproc.v1.PigJob pig_job = 7; + * .google.cloud.dataproc.v1.PigJob pig_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The pigJob. */ @@ -3450,10 +3754,11 @@ public com.google.cloud.dataproc.v1.PigJob getPigJob() { * * *
-     * Job is a Pig job.
+     * Optional. Job is a Pig job.
      * 
* - * .google.cloud.dataproc.v1.PigJob pig_job = 7; + * .google.cloud.dataproc.v1.PigJob pig_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder setPigJob(com.google.cloud.dataproc.v1.PigJob value) { if (pigJobBuilder_ == null) { @@ -3472,10 +3777,11 @@ public Builder setPigJob(com.google.cloud.dataproc.v1.PigJob value) { * * *
-     * Job is a Pig job.
+     * Optional. Job is a Pig job.
      * 
* - * .google.cloud.dataproc.v1.PigJob pig_job = 7; + * .google.cloud.dataproc.v1.PigJob pig_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder setPigJob(com.google.cloud.dataproc.v1.PigJob.Builder builderForValue) { if (pigJobBuilder_ == null) { @@ -3491,10 +3797,11 @@ public Builder setPigJob(com.google.cloud.dataproc.v1.PigJob.Builder builderForV * * *
-     * Job is a Pig job.
+     * Optional. Job is a Pig job.
      * 
* - * .google.cloud.dataproc.v1.PigJob pig_job = 7; + * .google.cloud.dataproc.v1.PigJob pig_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder mergePigJob(com.google.cloud.dataproc.v1.PigJob value) { if (pigJobBuilder_ == null) { @@ -3522,10 +3829,11 @@ public Builder mergePigJob(com.google.cloud.dataproc.v1.PigJob value) { * * *
-     * Job is a Pig job.
+     * Optional. Job is a Pig job.
      * 
* - * .google.cloud.dataproc.v1.PigJob pig_job = 7; + * .google.cloud.dataproc.v1.PigJob pig_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder clearPigJob() { if (pigJobBuilder_ == null) { @@ -3547,10 +3855,11 @@ public Builder clearPigJob() { * * *
-     * Job is a Pig job.
+     * Optional. Job is a Pig job.
      * 
* - * .google.cloud.dataproc.v1.PigJob pig_job = 7; + * .google.cloud.dataproc.v1.PigJob pig_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.PigJob.Builder getPigJobBuilder() { return getPigJobFieldBuilder().getBuilder(); @@ -3559,10 +3868,11 @@ public com.google.cloud.dataproc.v1.PigJob.Builder getPigJobBuilder() { * * *
-     * Job is a Pig job.
+     * Optional. Job is a Pig job.
      * 
* - * .google.cloud.dataproc.v1.PigJob pig_job = 7; + * .google.cloud.dataproc.v1.PigJob pig_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder() { if ((typeJobCase_ == 7) && (pigJobBuilder_ != null)) { @@ -3578,10 +3888,11 @@ public com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder() { * * *
-     * Job is a Pig job.
+     * Optional. Job is a Pig job.
      * 
* - * .google.cloud.dataproc.v1.PigJob pig_job = 7; + * .google.cloud.dataproc.v1.PigJob pig_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.PigJob, @@ -3607,82 +3918,313 @@ public com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder() { } private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.dataproc.v1.SparkSqlJob, - com.google.cloud.dataproc.v1.SparkSqlJob.Builder, - com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder> - sparkSqlJobBuilder_; + com.google.cloud.dataproc.v1.SparkRJob, + com.google.cloud.dataproc.v1.SparkRJob.Builder, + com.google.cloud.dataproc.v1.SparkRJobOrBuilder> + sparkRJobBuilder_; /** * * *
-     * Job is a SparkSql job.
+     * Optional. Job is a SparkR job.
      * 
* - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12; + * + * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 21 [(.google.api.field_behavior) = OPTIONAL]; + * * - * @return Whether the sparkSqlJob field is set. + * @return Whether the sparkRJob field is set. */ - public boolean hasSparkSqlJob() { - return typeJobCase_ == 12; + public boolean hasSparkRJob() { + return typeJobCase_ == 21; } /** * * *
-     * Job is a SparkSql job.
+     * Optional. Job is a SparkR job.
      * 
* - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12; + * + * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 21 [(.google.api.field_behavior) = OPTIONAL]; + * * - * @return The sparkSqlJob. + * @return The sparkRJob. */ - public com.google.cloud.dataproc.v1.SparkSqlJob getSparkSqlJob() { - if (sparkSqlJobBuilder_ == null) { - if (typeJobCase_ == 12) { - return (com.google.cloud.dataproc.v1.SparkSqlJob) typeJob_; + public com.google.cloud.dataproc.v1.SparkRJob getSparkRJob() { + if (sparkRJobBuilder_ == null) { + if (typeJobCase_ == 21) { + return (com.google.cloud.dataproc.v1.SparkRJob) typeJob_; } - return com.google.cloud.dataproc.v1.SparkSqlJob.getDefaultInstance(); + return com.google.cloud.dataproc.v1.SparkRJob.getDefaultInstance(); } else { - if (typeJobCase_ == 12) { - return sparkSqlJobBuilder_.getMessage(); + if (typeJobCase_ == 21) { + return sparkRJobBuilder_.getMessage(); } - return com.google.cloud.dataproc.v1.SparkSqlJob.getDefaultInstance(); + return com.google.cloud.dataproc.v1.SparkRJob.getDefaultInstance(); } } /** * * *
-     * Job is a SparkSql job.
+     * Optional. Job is a SparkR job.
      * 
* - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12; + * + * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 21 [(.google.api.field_behavior) = OPTIONAL]; + * */ - public Builder setSparkSqlJob(com.google.cloud.dataproc.v1.SparkSqlJob value) { - if (sparkSqlJobBuilder_ == null) { + public Builder setSparkRJob(com.google.cloud.dataproc.v1.SparkRJob value) { + if (sparkRJobBuilder_ == null) { if (value == null) { throw new NullPointerException(); } typeJob_ = value; onChanged(); } else { - sparkSqlJobBuilder_.setMessage(value); + sparkRJobBuilder_.setMessage(value); } - typeJobCase_ = 12; + typeJobCase_ = 21; return this; } /** * * *
-     * Job is a SparkSql job.
+     * Optional. Job is a SparkR job.
      * 
* - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12; + * + * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 21 [(.google.api.field_behavior) = OPTIONAL]; + * */ - public Builder setSparkSqlJob( - com.google.cloud.dataproc.v1.SparkSqlJob.Builder builderForValue) { - if (sparkSqlJobBuilder_ == null) { + public Builder setSparkRJob(com.google.cloud.dataproc.v1.SparkRJob.Builder builderForValue) { + if (sparkRJobBuilder_ == null) { + typeJob_ = builderForValue.build(); + onChanged(); + } else { + sparkRJobBuilder_.setMessage(builderForValue.build()); + } + typeJobCase_ = 21; + return this; + } + /** + * + * + *
+     * Optional. Job is a SparkR job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeSparkRJob(com.google.cloud.dataproc.v1.SparkRJob value) { + if (sparkRJobBuilder_ == null) { + if (typeJobCase_ == 21 + && typeJob_ != com.google.cloud.dataproc.v1.SparkRJob.getDefaultInstance()) { + typeJob_ = + com.google.cloud.dataproc.v1.SparkRJob.newBuilder( + (com.google.cloud.dataproc.v1.SparkRJob) typeJob_) + .mergeFrom(value) + .buildPartial(); + } else { + typeJob_ = value; + } + onChanged(); + } else { + if (typeJobCase_ == 21) { + sparkRJobBuilder_.mergeFrom(value); + } + sparkRJobBuilder_.setMessage(value); + } + typeJobCase_ = 21; + return this; + } + /** + * + * + *
+     * Optional. Job is a SparkR job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearSparkRJob() { + if (sparkRJobBuilder_ == null) { + if (typeJobCase_ == 21) { + typeJobCase_ = 0; + typeJob_ = null; + onChanged(); + } + } else { + if (typeJobCase_ == 21) { + typeJobCase_ = 0; + typeJob_ = null; + } + sparkRJobBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Optional. Job is a SparkR job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.SparkRJob.Builder getSparkRJobBuilder() { + return getSparkRJobFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. Job is a SparkR job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.SparkRJobOrBuilder getSparkRJobOrBuilder() { + if ((typeJobCase_ == 21) && (sparkRJobBuilder_ != null)) { + return sparkRJobBuilder_.getMessageOrBuilder(); + } else { + if (typeJobCase_ == 21) { + return (com.google.cloud.dataproc.v1.SparkRJob) typeJob_; + } + return com.google.cloud.dataproc.v1.SparkRJob.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. Job is a SparkR job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.SparkRJob, + com.google.cloud.dataproc.v1.SparkRJob.Builder, + com.google.cloud.dataproc.v1.SparkRJobOrBuilder> + getSparkRJobFieldBuilder() { + if (sparkRJobBuilder_ == null) { + if (!(typeJobCase_ == 21)) { + typeJob_ = com.google.cloud.dataproc.v1.SparkRJob.getDefaultInstance(); + } + sparkRJobBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.SparkRJob, + com.google.cloud.dataproc.v1.SparkRJob.Builder, + com.google.cloud.dataproc.v1.SparkRJobOrBuilder>( + (com.google.cloud.dataproc.v1.SparkRJob) typeJob_, + getParentForChildren(), + isClean()); + typeJob_ = null; + } + typeJobCase_ = 21; + onChanged(); + ; + return sparkRJobBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.SparkSqlJob, + com.google.cloud.dataproc.v1.SparkSqlJob.Builder, + com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder> + sparkSqlJobBuilder_; + /** + * + * + *
+     * Optional. Job is a SparkSql job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the sparkSqlJob field is set. + */ + public boolean hasSparkSqlJob() { + return typeJobCase_ == 12; + } + /** + * + * + *
+     * Optional. Job is a SparkSql job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The sparkSqlJob. + */ + public com.google.cloud.dataproc.v1.SparkSqlJob getSparkSqlJob() { + if (sparkSqlJobBuilder_ == null) { + if (typeJobCase_ == 12) { + return (com.google.cloud.dataproc.v1.SparkSqlJob) typeJob_; + } + return com.google.cloud.dataproc.v1.SparkSqlJob.getDefaultInstance(); + } else { + if (typeJobCase_ == 12) { + return sparkSqlJobBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1.SparkSqlJob.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. Job is a SparkSql job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSparkSqlJob(com.google.cloud.dataproc.v1.SparkSqlJob value) { + if (sparkSqlJobBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + typeJob_ = value; + onChanged(); + } else { + sparkSqlJobBuilder_.setMessage(value); + } + typeJobCase_ = 12; + return this; + } + /** + * + * + *
+     * Optional. Job is a SparkSql job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSparkSqlJob( + com.google.cloud.dataproc.v1.SparkSqlJob.Builder builderForValue) { + if (sparkSqlJobBuilder_ == null) { typeJob_ = builderForValue.build(); onChanged(); } else { @@ -3695,10 +4237,12 @@ public Builder setSparkSqlJob( * * *
-     * Job is a SparkSql job.
+     * Optional. Job is a SparkSql job.
      * 
* - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12; + * + * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder mergeSparkSqlJob(com.google.cloud.dataproc.v1.SparkSqlJob value) { if (sparkSqlJobBuilder_ == null) { @@ -3726,10 +4270,12 @@ public Builder mergeSparkSqlJob(com.google.cloud.dataproc.v1.SparkSqlJob value) * * *
-     * Job is a SparkSql job.
+     * Optional. Job is a SparkSql job.
      * 
* - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12; + * + * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + * */ public Builder clearSparkSqlJob() { if (sparkSqlJobBuilder_ == null) { @@ -3751,10 +4297,12 @@ public Builder clearSparkSqlJob() { * * *
-     * Job is a SparkSql job.
+     * Optional. Job is a SparkSql job.
      * 
* - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12; + * + * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.SparkSqlJob.Builder getSparkSqlJobBuilder() { return getSparkSqlJobFieldBuilder().getBuilder(); @@ -3763,10 +4311,12 @@ public com.google.cloud.dataproc.v1.SparkSqlJob.Builder getSparkSqlJobBuilder() * * *
-     * Job is a SparkSql job.
+     * Optional. Job is a SparkSql job.
      * 
* - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12; + * + * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + * */ public com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder() { if ((typeJobCase_ == 12) && (sparkSqlJobBuilder_ != null)) { @@ -3782,10 +4332,12 @@ public com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder * * *
-     * Job is a SparkSql job.
+     * Optional. Job is a SparkSql job.
      * 
* - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12; + * + * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.SparkSqlJob, @@ -3812,6 +4364,229 @@ public com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder return sparkSqlJobBuilder_; } + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.PrestoJob, + com.google.cloud.dataproc.v1.PrestoJob.Builder, + com.google.cloud.dataproc.v1.PrestoJobOrBuilder> + prestoJobBuilder_; + /** + * + * + *
+     * Optional. Job is a Presto job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PrestoJob presto_job = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the prestoJob field is set. + */ + public boolean hasPrestoJob() { + return typeJobCase_ == 23; + } + /** + * + * + *
+     * Optional. Job is a Presto job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PrestoJob presto_job = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The prestoJob. + */ + public com.google.cloud.dataproc.v1.PrestoJob getPrestoJob() { + if (prestoJobBuilder_ == null) { + if (typeJobCase_ == 23) { + return (com.google.cloud.dataproc.v1.PrestoJob) typeJob_; + } + return com.google.cloud.dataproc.v1.PrestoJob.getDefaultInstance(); + } else { + if (typeJobCase_ == 23) { + return prestoJobBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1.PrestoJob.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. Job is a Presto job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PrestoJob presto_job = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrestoJob(com.google.cloud.dataproc.v1.PrestoJob value) { + if (prestoJobBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + typeJob_ = value; + onChanged(); + } else { + prestoJobBuilder_.setMessage(value); + } + typeJobCase_ = 23; + return this; + } + /** + * + * + *
+     * Optional. Job is a Presto job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PrestoJob presto_job = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrestoJob(com.google.cloud.dataproc.v1.PrestoJob.Builder builderForValue) { + if (prestoJobBuilder_ == null) { + typeJob_ = builderForValue.build(); + onChanged(); + } else { + prestoJobBuilder_.setMessage(builderForValue.build()); + } + typeJobCase_ = 23; + return this; + } + /** + * + * + *
+     * Optional. Job is a Presto job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PrestoJob presto_job = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergePrestoJob(com.google.cloud.dataproc.v1.PrestoJob value) { + if (prestoJobBuilder_ == null) { + if (typeJobCase_ == 23 + && typeJob_ != com.google.cloud.dataproc.v1.PrestoJob.getDefaultInstance()) { + typeJob_ = + com.google.cloud.dataproc.v1.PrestoJob.newBuilder( + (com.google.cloud.dataproc.v1.PrestoJob) typeJob_) + .mergeFrom(value) + .buildPartial(); + } else { + typeJob_ = value; + } + onChanged(); + } else { + if (typeJobCase_ == 23) { + prestoJobBuilder_.mergeFrom(value); + } + prestoJobBuilder_.setMessage(value); + } + typeJobCase_ = 23; + return this; + } + /** + * + * + *
+     * Optional. Job is a Presto job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PrestoJob presto_job = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearPrestoJob() { + if (prestoJobBuilder_ == null) { + if (typeJobCase_ == 23) { + typeJobCase_ = 0; + typeJob_ = null; + onChanged(); + } + } else { + if (typeJobCase_ == 23) { + typeJobCase_ = 0; + typeJob_ = null; + } + prestoJobBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Optional. Job is a Presto job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PrestoJob presto_job = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.PrestoJob.Builder getPrestoJobBuilder() { + return getPrestoJobFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. Job is a Presto job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PrestoJob presto_job = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.PrestoJobOrBuilder getPrestoJobOrBuilder() { + if ((typeJobCase_ == 23) && (prestoJobBuilder_ != null)) { + return prestoJobBuilder_.getMessageOrBuilder(); + } else { + if (typeJobCase_ == 23) { + return (com.google.cloud.dataproc.v1.PrestoJob) typeJob_; + } + return com.google.cloud.dataproc.v1.PrestoJob.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. Job is a Presto job.
+     * 
+ * + * + * .google.cloud.dataproc.v1.PrestoJob presto_job = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.PrestoJob, + com.google.cloud.dataproc.v1.PrestoJob.Builder, + com.google.cloud.dataproc.v1.PrestoJobOrBuilder> + getPrestoJobFieldBuilder() { + if (prestoJobBuilder_ == null) { + if (!(typeJobCase_ == 23)) { + typeJob_ = com.google.cloud.dataproc.v1.PrestoJob.getDefaultInstance(); + } + prestoJobBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.PrestoJob, + com.google.cloud.dataproc.v1.PrestoJob.Builder, + com.google.cloud.dataproc.v1.PrestoJobOrBuilder>( + (com.google.cloud.dataproc.v1.PrestoJob) typeJob_, + getParentForChildren(), + isClean()); + typeJob_ = null; + } + typeJobCase_ = 23; + onChanged(); + ; + return prestoJobBuilder_; + } + private com.google.cloud.dataproc.v1.JobStatus status_; private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.JobStatus, diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobOrBuilder.java index 45bb6c7c..b1b8548b 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobOrBuilder.java @@ -121,10 +121,12 @@ public interface JobOrBuilder * * *
-   * Job is a Hadoop job.
+   * Optional. Job is a Hadoop job.
    * 
* - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3; + * + * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the hadoopJob field is set. */ @@ -133,10 +135,12 @@ public interface JobOrBuilder * * *
-   * Job is a Hadoop job.
+   * Optional. Job is a Hadoop job.
    * 
* - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3; + * + * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The hadoopJob. */ @@ -145,10 +149,12 @@ public interface JobOrBuilder * * *
-   * Job is a Hadoop job.
+   * Optional. Job is a Hadoop job.
    * 
* - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3; + * + * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 3 [(.google.api.field_behavior) = OPTIONAL]; + * */ com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder(); @@ -156,10 +162,12 @@ public interface JobOrBuilder * * *
-   * Job is a Spark job.
+   * Optional. Job is a Spark job.
    * 
* - * .google.cloud.dataproc.v1.SparkJob spark_job = 4; + * + * .google.cloud.dataproc.v1.SparkJob spark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the sparkJob field is set. */ @@ -168,10 +176,12 @@ public interface JobOrBuilder * * *
-   * Job is a Spark job.
+   * Optional. Job is a Spark job.
    * 
* - * .google.cloud.dataproc.v1.SparkJob spark_job = 4; + * + * .google.cloud.dataproc.v1.SparkJob spark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The sparkJob. */ @@ -180,10 +190,12 @@ public interface JobOrBuilder * * *
-   * Job is a Spark job.
+   * Optional. Job is a Spark job.
    * 
* - * .google.cloud.dataproc.v1.SparkJob spark_job = 4; + * + * .google.cloud.dataproc.v1.SparkJob spark_job = 4 [(.google.api.field_behavior) = OPTIONAL]; + * */ com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder(); @@ -191,10 +203,12 @@ public interface JobOrBuilder * * *
-   * Job is a Pyspark job.
+   * Optional. Job is a PySpark job.
    * 
* - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5; + * + * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the pysparkJob field is set. */ @@ -203,10 +217,12 @@ public interface JobOrBuilder * * *
-   * Job is a Pyspark job.
+   * Optional. Job is a PySpark job.
    * 
* - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5; + * + * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The pysparkJob. */ @@ -215,10 +231,12 @@ public interface JobOrBuilder * * *
-   * Job is a Pyspark job.
+   * Optional. Job is a PySpark job.
    * 
* - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5; + * + * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 5 [(.google.api.field_behavior) = OPTIONAL]; + * */ com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder(); @@ -226,10 +244,11 @@ public interface JobOrBuilder * * *
-   * Job is a Hive job.
+   * Optional. Job is a Hive job.
    * 
* - * .google.cloud.dataproc.v1.HiveJob hive_job = 6; + * .google.cloud.dataproc.v1.HiveJob hive_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the hiveJob field is set. */ @@ -238,10 +257,11 @@ public interface JobOrBuilder * * *
-   * Job is a Hive job.
+   * Optional. Job is a Hive job.
    * 
* - * .google.cloud.dataproc.v1.HiveJob hive_job = 6; + * .google.cloud.dataproc.v1.HiveJob hive_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The hiveJob. */ @@ -250,10 +270,11 @@ public interface JobOrBuilder * * *
-   * Job is a Hive job.
+   * Optional. Job is a Hive job.
    * 
* - * .google.cloud.dataproc.v1.HiveJob hive_job = 6; + * .google.cloud.dataproc.v1.HiveJob hive_job = 6 [(.google.api.field_behavior) = OPTIONAL]; + * */ com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder(); @@ -261,10 +282,11 @@ public interface JobOrBuilder * * *
-   * Job is a Pig job.
+   * Optional. Job is a Pig job.
    * 
* - * .google.cloud.dataproc.v1.PigJob pig_job = 7; + * .google.cloud.dataproc.v1.PigJob pig_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the pigJob field is set. */ @@ -273,10 +295,11 @@ public interface JobOrBuilder * * *
-   * Job is a Pig job.
+   * Optional. Job is a Pig job.
    * 
* - * .google.cloud.dataproc.v1.PigJob pig_job = 7; + * .google.cloud.dataproc.v1.PigJob pig_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The pigJob. */ @@ -285,10 +308,11 @@ public interface JobOrBuilder * * *
-   * Job is a Pig job.
+   * Optional. Job is a Pig job.
    * 
* - * .google.cloud.dataproc.v1.PigJob pig_job = 7; + * .google.cloud.dataproc.v1.PigJob pig_job = 7 [(.google.api.field_behavior) = OPTIONAL]; + * */ com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder(); @@ -296,10 +320,53 @@ public interface JobOrBuilder * * *
-   * Job is a SparkSql job.
+   * Optional. Job is a SparkR job.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the sparkRJob field is set. + */ + boolean hasSparkRJob(); + /** + * + * + *
+   * Optional. Job is a SparkR job.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The sparkRJob. + */ + com.google.cloud.dataproc.v1.SparkRJob getSparkRJob(); + /** + * + * + *
+   * Optional. Job is a SparkR job.
+   * 
+ * + * + * .google.cloud.dataproc.v1.SparkRJob spark_r_job = 21 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.SparkRJobOrBuilder getSparkRJobOrBuilder(); + + /** + * + * + *
+   * Optional. Job is a SparkSql job.
    * 
* - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12; + * + * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return Whether the sparkSqlJob field is set. */ @@ -308,10 +375,12 @@ public interface JobOrBuilder * * *
-   * Job is a SparkSql job.
+   * Optional. Job is a SparkSql job.
    * 
* - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12; + * + * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + * * * @return The sparkSqlJob. */ @@ -320,13 +389,56 @@ public interface JobOrBuilder * * *
-   * Job is a SparkSql job.
+   * Optional. Job is a SparkSql job.
    * 
* - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12; + * + * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 12 [(.google.api.field_behavior) = OPTIONAL]; + * */ com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder(); + /** + * + * + *
+   * Optional. Job is a Presto job.
+   * 
+ * + * + * .google.cloud.dataproc.v1.PrestoJob presto_job = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the prestoJob field is set. + */ + boolean hasPrestoJob(); + /** + * + * + *
+   * Optional. Job is a Presto job.
+   * 
+ * + * + * .google.cloud.dataproc.v1.PrestoJob presto_job = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The prestoJob. + */ + com.google.cloud.dataproc.v1.PrestoJob getPrestoJob(); + /** + * + * + *
+   * Optional. Job is a Presto job.
+   * 
+ * + * + * .google.cloud.dataproc.v1.PrestoJob presto_job = 23 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.PrestoJobOrBuilder getPrestoJobOrBuilder(); + /** * * diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobsProto.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobsProto.java index 3b84b277..9a960862 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobsProto.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobsProto.java @@ -99,6 +99,22 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_cloud_dataproc_v1_PigJob_PropertiesEntry_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_cloud_dataproc_v1_PigJob_PropertiesEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_SparkRJob_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_SparkRJob_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_SparkRJob_PropertiesEntry_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_SparkRJob_PropertiesEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_PrestoJob_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_PrestoJob_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_dataproc_v1_PrestoJob_PropertiesEntry_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_dataproc_v1_PrestoJob_PropertiesEntry_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_google_cloud_dataproc_v1_JobPlacement_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -167,188 +183,211 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "\n#google/cloud/dataproc/v1/jobs.proto\022\030g" + "oogle.cloud.dataproc.v1\032\034google/api/anno" + "tations.proto\032\027google/api/client.proto\032\037" - + "google/api/field_behavior.proto\032\033google/" - + "protobuf/empty.proto\032 google/protobuf/fi" - + "eld_mask.proto\032\037google/protobuf/timestam" - + "p.proto\"\301\002\n\rLoggingConfig\022W\n\021driver_log_" - + "levels\030\002 \003(\0132<.google.cloud.dataproc.v1." - + "LoggingConfig.DriverLogLevelsEntry\032e\n\024Dr" - + "iverLogLevelsEntry\022\013\n\003key\030\001 \001(\t\022<\n\005value" - + "\030\002 \001(\0162-.google.cloud.dataproc.v1.Loggin" - + "gConfig.Level:\0028\001\"p\n\005Level\022\025\n\021LEVEL_UNSP" - + "ECIFIED\020\000\022\007\n\003ALL\020\001\022\t\n\005TRACE\020\002\022\t\n\005DEBUG\020\003" - + "\022\010\n\004INFO\020\004\022\010\n\004WARN\020\005\022\t\n\005ERROR\020\006\022\t\n\005FATAL" - + "\020\007\022\007\n\003OFF\020\010\"\361\002\n\tHadoopJob\022\033\n\021main_jar_fi" - + "le_uri\030\001 \001(\tH\000\022\024\n\nmain_class\030\002 \001(\tH\000\022\021\n\004" - + "args\030\003 \003(\tB\003\340A\001\022\032\n\rjar_file_uris\030\004 \003(\tB\003" - + "\340A\001\022\026\n\tfile_uris\030\005 \003(\tB\003\340A\001\022\031\n\014archive_u" - + "ris\030\006 \003(\tB\003\340A\001\022L\n\nproperties\030\007 \003(\01323.goo" - + "gle.cloud.dataproc.v1.HadoopJob.Properti" - + "esEntryB\003\340A\001\022D\n\016logging_config\030\010 \001(\0132\'.g" - + "oogle.cloud.dataproc.v1.LoggingConfigB\003\340" - + "A\001\0321\n\017PropertiesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005va" - + "lue\030\002 \001(\t:\0028\001B\010\n\006driver\"\357\002\n\010SparkJob\022\033\n\021" - + "main_jar_file_uri\030\001 \001(\tH\000\022\024\n\nmain_class\030" - + "\002 \001(\tH\000\022\021\n\004args\030\003 \003(\tB\003\340A\001\022\032\n\rjar_file_u" - + "ris\030\004 \003(\tB\003\340A\001\022\026\n\tfile_uris\030\005 \003(\tB\003\340A\001\022\031" - + "\n\014archive_uris\030\006 \003(\tB\003\340A\001\022K\n\nproperties\030" - + "\007 \003(\01322.google.cloud.dataproc.v1.SparkJo" - + "b.PropertiesEntryB\003\340A\001\022D\n\016logging_config" - + "\030\010 \001(\0132\'.google.cloud.dataproc.v1.Loggin" - + "gConfigB\003\340A\001\0321\n\017PropertiesEntry\022\013\n\003key\030\001" - + " \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\010\n\006driver\"\370\002\n\nPy" - + "SparkJob\022!\n\024main_python_file_uri\030\001 \001(\tB\003" - + "\340A\002\022\021\n\004args\030\002 \003(\tB\003\340A\001\022\035\n\020python_file_ur" - + "is\030\003 \003(\tB\003\340A\001\022\032\n\rjar_file_uris\030\004 \003(\tB\003\340A" - + "\001\022\026\n\tfile_uris\030\005 \003(\tB\003\340A\001\022\031\n\014archive_uri" - + "s\030\006 \003(\tB\003\340A\001\022M\n\nproperties\030\007 \003(\01324.googl" - + "e.cloud.dataproc.v1.PySparkJob.Propertie" - + "sEntryB\003\340A\001\022D\n\016logging_config\030\010 \001(\0132\'.go" - + "ogle.cloud.dataproc.v1.LoggingConfigB\003\340A" - + "\001\0321\n\017PropertiesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005val" - + "ue\030\002 \001(\t:\0028\001\"!\n\tQueryList\022\024\n\007queries\030\001 \003" - + "(\tB\003\340A\002\"\265\003\n\007HiveJob\022\030\n\016query_file_uri\030\001 " - + "\001(\tH\000\0229\n\nquery_list\030\002 \001(\0132#.google.cloud" - + ".dataproc.v1.QueryListH\000\022 \n\023continue_on_" - + "failure\030\003 \001(\010B\003\340A\001\022U\n\020script_variables\030\004" - + " \003(\01326.google.cloud.dataproc.v1.HiveJob." - + "ScriptVariablesEntryB\003\340A\001\022J\n\nproperties\030" - + "\005 \003(\01321.google.cloud.dataproc.v1.HiveJob" - + ".PropertiesEntryB\003\340A\001\022\032\n\rjar_file_uris\030\006" - + " \003(\tB\003\340A\001\0326\n\024ScriptVariablesEntry\022\013\n\003key" - + "\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\0321\n\017PropertiesE" - + "ntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\t\n\007" - + "queries\"\345\003\n\013SparkSqlJob\022\030\n\016query_file_ur" + + "google/api/field_behavior.proto\032#google/" + + "longrunning/operations.proto\032\033google/pro" + + "tobuf/empty.proto\032 google/protobuf/field" + + "_mask.proto\032\037google/protobuf/timestamp.p" + + "roto\"\301\002\n\rLoggingConfig\022W\n\021driver_log_lev" + + "els\030\002 \003(\0132<.google.cloud.dataproc.v1.Log" + + "gingConfig.DriverLogLevelsEntry\032e\n\024Drive" + + "rLogLevelsEntry\022\013\n\003key\030\001 \001(\t\022<\n\005value\030\002 " + + "\001(\0162-.google.cloud.dataproc.v1.LoggingCo" + + "nfig.Level:\0028\001\"p\n\005Level\022\025\n\021LEVEL_UNSPECI" + + "FIED\020\000\022\007\n\003ALL\020\001\022\t\n\005TRACE\020\002\022\t\n\005DEBUG\020\003\022\010\n" + + "\004INFO\020\004\022\010\n\004WARN\020\005\022\t\n\005ERROR\020\006\022\t\n\005FATAL\020\007\022" + + "\007\n\003OFF\020\010\"\361\002\n\tHadoopJob\022\033\n\021main_jar_file_" + + "uri\030\001 \001(\tH\000\022\024\n\nmain_class\030\002 \001(\tH\000\022\021\n\004arg" + + "s\030\003 \003(\tB\003\340A\001\022\032\n\rjar_file_uris\030\004 \003(\tB\003\340A\001" + + "\022\026\n\tfile_uris\030\005 \003(\tB\003\340A\001\022\031\n\014archive_uris" + + "\030\006 \003(\tB\003\340A\001\022L\n\nproperties\030\007 \003(\01323.google" + + ".cloud.dataproc.v1.HadoopJob.PropertiesE" + + "ntryB\003\340A\001\022D\n\016logging_config\030\010 \001(\0132\'.goog" + + "le.cloud.dataproc.v1.LoggingConfigB\003\340A\001\032" + + "1\n\017PropertiesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value" + + "\030\002 \001(\t:\0028\001B\010\n\006driver\"\357\002\n\010SparkJob\022\033\n\021mai" + + "n_jar_file_uri\030\001 \001(\tH\000\022\024\n\nmain_class\030\002 \001" + + "(\tH\000\022\021\n\004args\030\003 \003(\tB\003\340A\001\022\032\n\rjar_file_uris" + + "\030\004 \003(\tB\003\340A\001\022\026\n\tfile_uris\030\005 \003(\tB\003\340A\001\022\031\n\014a" + + "rchive_uris\030\006 \003(\tB\003\340A\001\022K\n\nproperties\030\007 \003" + + "(\01322.google.cloud.dataproc.v1.SparkJob.P" + + "ropertiesEntryB\003\340A\001\022D\n\016logging_config\030\010 " + + "\001(\0132\'.google.cloud.dataproc.v1.LoggingCo" + + "nfigB\003\340A\001\0321\n\017PropertiesEntry\022\013\n\003key\030\001 \001(" + + "\t\022\r\n\005value\030\002 \001(\t:\0028\001B\010\n\006driver\"\370\002\n\nPySpa" + + "rkJob\022!\n\024main_python_file_uri\030\001 \001(\tB\003\340A\002" + + "\022\021\n\004args\030\002 \003(\tB\003\340A\001\022\035\n\020python_file_uris\030" + + "\003 \003(\tB\003\340A\001\022\032\n\rjar_file_uris\030\004 \003(\tB\003\340A\001\022\026" + + "\n\tfile_uris\030\005 \003(\tB\003\340A\001\022\031\n\014archive_uris\030\006" + + " \003(\tB\003\340A\001\022M\n\nproperties\030\007 \003(\01324.google.c" + + "loud.dataproc.v1.PySparkJob.PropertiesEn" + + "tryB\003\340A\001\022D\n\016logging_config\030\010 \001(\0132\'.googl" + + "e.cloud.dataproc.v1.LoggingConfigB\003\340A\001\0321" + + "\n\017PropertiesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030" + + "\002 \001(\t:\0028\001\"!\n\tQueryList\022\024\n\007queries\030\001 \003(\tB" + + "\003\340A\002\"\265\003\n\007HiveJob\022\030\n\016query_file_uri\030\001 \001(\t" + + "H\000\0229\n\nquery_list\030\002 \001(\0132#.google.cloud.da" + + "taproc.v1.QueryListH\000\022 \n\023continue_on_fai" + + "lure\030\003 \001(\010B\003\340A\001\022U\n\020script_variables\030\004 \003(" + + "\01326.google.cloud.dataproc.v1.HiveJob.Scr" + + "iptVariablesEntryB\003\340A\001\022J\n\nproperties\030\005 \003" + + "(\01321.google.cloud.dataproc.v1.HiveJob.Pr" + + "opertiesEntryB\003\340A\001\022\032\n\rjar_file_uris\030\006 \003(" + + "\tB\003\340A\001\0326\n\024ScriptVariablesEntry\022\013\n\003key\030\001 " + + "\001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\0321\n\017PropertiesEntr" + + "y\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\t\n\007que" + + "ries\"\345\003\n\013SparkSqlJob\022\030\n\016query_file_uri\030\001" + + " \001(\tH\000\0229\n\nquery_list\030\002 \001(\0132#.google.clou" + + "d.dataproc.v1.QueryListH\000\022Y\n\020script_vari" + + "ables\030\003 \003(\0132:.google.cloud.dataproc.v1.S" + + "parkSqlJob.ScriptVariablesEntryB\003\340A\001\022N\n\n" + + "properties\030\004 \003(\01325.google.cloud.dataproc" + + ".v1.SparkSqlJob.PropertiesEntryB\003\340A\001\022\032\n\r" + + "jar_file_uris\0308 \003(\tB\003\340A\001\022D\n\016logging_conf" + + "ig\030\006 \001(\0132\'.google.cloud.dataproc.v1.Logg" + + "ingConfigB\003\340A\001\0326\n\024ScriptVariablesEntry\022\013" + + "\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\0321\n\017Proper" + + "tiesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028" + + "\001B\t\n\007queries\"\370\003\n\006PigJob\022\030\n\016query_file_ur" + "i\030\001 \001(\tH\000\0229\n\nquery_list\030\002 \001(\0132#.google.c" - + "loud.dataproc.v1.QueryListH\000\022Y\n\020script_v" - + "ariables\030\003 \003(\0132:.google.cloud.dataproc.v" - + "1.SparkSqlJob.ScriptVariablesEntryB\003\340A\001\022" - + "N\n\nproperties\030\004 \003(\01325.google.cloud.datap" - + "roc.v1.SparkSqlJob.PropertiesEntryB\003\340A\001\022" - + "\032\n\rjar_file_uris\0308 \003(\tB\003\340A\001\022D\n\016logging_c" - + "onfig\030\006 \001(\0132\'.google.cloud.dataproc.v1.L" - + "oggingConfigB\003\340A\001\0326\n\024ScriptVariablesEntr" - + "y\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\0321\n\017Pro" - + "pertiesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t" - + ":\0028\001B\t\n\007queries\"\370\003\n\006PigJob\022\030\n\016query_file" - + "_uri\030\001 \001(\tH\000\0229\n\nquery_list\030\002 \001(\0132#.googl" - + "e.cloud.dataproc.v1.QueryListH\000\022 \n\023conti" - + "nue_on_failure\030\003 \001(\010B\003\340A\001\022T\n\020script_vari" - + "ables\030\004 \003(\01325.google.cloud.dataproc.v1.P" - + "igJob.ScriptVariablesEntryB\003\340A\001\022I\n\nprope" - + "rties\030\005 \003(\01320.google.cloud.dataproc.v1.P" - + "igJob.PropertiesEntryB\003\340A\001\022\032\n\rjar_file_u" - + "ris\030\006 \003(\tB\003\340A\001\022D\n\016logging_config\030\007 \001(\0132\'" - + ".google.cloud.dataproc.v1.LoggingConfigB" - + "\003\340A\001\0326\n\024ScriptVariablesEntry\022\013\n\003key\030\001 \001(" - + "\t\022\r\n\005value\030\002 \001(\t:\0028\001\0321\n\017PropertiesEntry\022" - + "\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\t\n\007queri" - + "es\"D\n\014JobPlacement\022\031\n\014cluster_name\030\001 \001(\t" - + "B\003\340A\002\022\031\n\014cluster_uuid\030\002 \001(\tB\003\340A\003\"\331\003\n\tJob" - + "Status\022=\n\005state\030\001 \001(\0162).google.cloud.dat" - + "aproc.v1.JobStatus.StateB\003\340A\003\022\027\n\007details" - + "\030\002 \001(\tB\006\340A\003\340A\001\0229\n\020state_start_time\030\006 \001(\013" - + "2\032.google.protobuf.TimestampB\003\340A\003\022C\n\010sub" - + "state\030\007 \001(\0162,.google.cloud.dataproc.v1.J" - + "obStatus.SubstateB\003\340A\003\"\251\001\n\005State\022\025\n\021STAT" - + "E_UNSPECIFIED\020\000\022\013\n\007PENDING\020\001\022\016\n\nSETUP_DO" - + "NE\020\010\022\013\n\007RUNNING\020\002\022\022\n\016CANCEL_PENDING\020\003\022\022\n" - + "\016CANCEL_STARTED\020\007\022\r\n\tCANCELLED\020\004\022\010\n\004DONE" - + "\020\005\022\t\n\005ERROR\020\006\022\023\n\017ATTEMPT_FAILURE\020\t\"H\n\010Su" - + "bstate\022\017\n\013UNSPECIFIED\020\000\022\r\n\tSUBMITTED\020\001\022\n" - + "\n\006QUEUED\020\002\022\020\n\014STALE_STATUS\020\003\"<\n\014JobRefer" - + "ence\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006job_id\030" - + "\002 \001(\tB\003\340A\001\"\245\002\n\017YarnApplication\022\021\n\004name\030\001" - + " \001(\tB\003\340A\002\022C\n\005state\030\002 \001(\0162/.google.cloud." - + "dataproc.v1.YarnApplication.StateB\003\340A\002\022\025" - + "\n\010progress\030\003 \001(\002B\003\340A\002\022\031\n\014tracking_url\030\004 " - + "\001(\tB\003\340A\001\"\207\001\n\005State\022\025\n\021STATE_UNSPECIFIED\020" - + "\000\022\007\n\003NEW\020\001\022\016\n\nNEW_SAVING\020\002\022\r\n\tSUBMITTED\020" - + "\003\022\014\n\010ACCEPTED\020\004\022\013\n\007RUNNING\020\005\022\014\n\010FINISHED" - + "\020\006\022\n\n\006FAILED\020\007\022\n\n\006KILLED\020\010\"\315\007\n\003Job\022>\n\tre" - + "ference\030\001 \001(\0132&.google.cloud.dataproc.v1" - + ".JobReferenceB\003\340A\001\022>\n\tplacement\030\002 \001(\0132&." - + "google.cloud.dataproc.v1.JobPlacementB\003\340" - + "A\002\0229\n\nhadoop_job\030\003 \001(\0132#.google.cloud.da" - + "taproc.v1.HadoopJobH\000\0227\n\tspark_job\030\004 \001(\013" - + "2\".google.cloud.dataproc.v1.SparkJobH\000\022;" - + "\n\013pyspark_job\030\005 \001(\0132$.google.cloud.datap" - + "roc.v1.PySparkJobH\000\0225\n\010hive_job\030\006 \001(\0132!." - + "google.cloud.dataproc.v1.HiveJobH\000\0223\n\007pi" - + "g_job\030\007 \001(\0132 .google.cloud.dataproc.v1.P" - + "igJobH\000\022>\n\rspark_sql_job\030\014 \001(\0132%.google." - + "cloud.dataproc.v1.SparkSqlJobH\000\0228\n\006statu" - + "s\030\010 \001(\0132#.google.cloud.dataproc.v1.JobSt" - + "atusB\003\340A\003\022@\n\016status_history\030\r \003(\0132#.goog" - + "le.cloud.dataproc.v1.JobStatusB\003\340A\003\022I\n\021y" - + "arn_applications\030\t \003(\0132).google.cloud.da" - + "taproc.v1.YarnApplicationB\003\340A\003\022\'\n\032driver" - + "_output_resource_uri\030\021 \001(\tB\003\340A\003\022%\n\030drive" - + "r_control_files_uri\030\017 \001(\tB\003\340A\003\022>\n\006labels" - + "\030\022 \003(\0132).google.cloud.dataproc.v1.Job.La" - + "belsEntryB\003\340A\001\022@\n\nscheduling\030\024 \001(\0132\'.goo" - + "gle.cloud.dataproc.v1.JobSchedulingB\003\340A\001" - + "\022\025\n\010job_uuid\030\026 \001(\tB\003\340A\003\032-\n\013LabelsEntry\022\013" - + "\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\n\n\010type_j" - + "ob\"3\n\rJobScheduling\022\"\n\025max_failures_per_" - + "hour\030\001 \001(\005B\003\340A\001\"\212\001\n\020SubmitJobRequest\022\027\n\n" - + "project_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340" - + "A\002\022/\n\003job\030\002 \001(\0132\035.google.cloud.dataproc." - + "v1.JobB\003\340A\002\022\027\n\nrequest_id\030\004 \001(\tB\003\340A\001\"R\n\r" - + "GetJobRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023" - + "\n\006region\030\003 \001(\tB\003\340A\002\022\023\n\006job_id\030\002 \001(\tB\003\340A\002" - + "\"\263\002\n\017ListJobsRequest\022\027\n\nproject_id\030\001 \001(\t" - + "B\003\340A\002\022\023\n\006region\030\006 \001(\tB\003\340A\002\022\026\n\tpage_size\030" - + "\002 \001(\005B\003\340A\001\022\027\n\npage_token\030\003 \001(\tB\003\340A\001\022\031\n\014c" - + "luster_name\030\004 \001(\tB\003\340A\001\022Y\n\021job_state_matc" - + "her\030\005 \001(\01629.google.cloud.dataproc.v1.Lis" - + "tJobsRequest.JobStateMatcherB\003\340A\001\022\023\n\006fil" - + "ter\030\007 \001(\tB\003\340A\001\"6\n\017JobStateMatcher\022\007\n\003ALL" - + "\020\000\022\n\n\006ACTIVE\020\001\022\016\n\nNON_ACTIVE\020\002\"\274\001\n\020Updat" - + "eJobRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006" - + "region\030\002 \001(\tB\003\340A\002\022\023\n\006job_id\030\003 \001(\tB\003\340A\002\022/" - + "\n\003job\030\004 \001(\0132\035.google.cloud.dataproc.v1.J" - + "obB\003\340A\002\0224\n\013update_mask\030\005 \001(\0132\032.google.pr" - + "otobuf.FieldMaskB\003\340A\002\"b\n\020ListJobsRespons" - + "e\0220\n\004jobs\030\001 \003(\0132\035.google.cloud.dataproc." - + "v1.JobB\003\340A\003\022\034\n\017next_page_token\030\002 \001(\tB\003\340A" - + "\001\"U\n\020CancelJobRequest\022\027\n\nproject_id\030\001 \001(" - + "\tB\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340A\002\022\023\n\006job_id\030\002 " - + "\001(\tB\003\340A\002\"U\n\020DeleteJobRequest\022\027\n\nproject_" - + "id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340A\002\022\023\n\006jo" - + "b_id\030\002 \001(\tB\003\340A\0022\233\t\n\rJobController\022\261\001\n\tSu" - + "bmitJob\022*.google.cloud.dataproc.v1.Submi" + + "loud.dataproc.v1.QueryListH\000\022 \n\023continue" + + "_on_failure\030\003 \001(\010B\003\340A\001\022T\n\020script_variabl" + + "es\030\004 \003(\01325.google.cloud.dataproc.v1.PigJ" + + "ob.ScriptVariablesEntryB\003\340A\001\022I\n\nproperti" + + "es\030\005 \003(\01320.google.cloud.dataproc.v1.PigJ" + + "ob.PropertiesEntryB\003\340A\001\022\032\n\rjar_file_uris" + + "\030\006 \003(\tB\003\340A\001\022D\n\016logging_config\030\007 \001(\0132\'.go" + + "ogle.cloud.dataproc.v1.LoggingConfigB\003\340A" + + "\001\0326\n\024ScriptVariablesEntry\022\013\n\003key\030\001 \001(\t\022\r" + + "\n\005value\030\002 \001(\t:\0028\001\0321\n\017PropertiesEntry\022\013\n\003" + + "key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\t\n\007queries\"" + + "\266\002\n\tSparkRJob\022\034\n\017main_r_file_uri\030\001 \001(\tB\003" + + "\340A\002\022\021\n\004args\030\002 \003(\tB\003\340A\001\022\026\n\tfile_uris\030\003 \003(" + + "\tB\003\340A\001\022\031\n\014archive_uris\030\004 \003(\tB\003\340A\001\022L\n\npro" + + "perties\030\005 \003(\01323.google.cloud.dataproc.v1" + + ".SparkRJob.PropertiesEntryB\003\340A\001\022D\n\016loggi" + + "ng_config\030\006 \001(\0132\'.google.cloud.dataproc." + + "v1.LoggingConfigB\003\340A\001\0321\n\017PropertiesEntry" + + "\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\212\003\n\tPre" + + "stoJob\022\030\n\016query_file_uri\030\001 \001(\tH\000\0229\n\nquer" + + "y_list\030\002 \001(\0132#.google.cloud.dataproc.v1." + + "QueryListH\000\022 \n\023continue_on_failure\030\003 \001(\010" + + "B\003\340A\001\022\032\n\routput_format\030\004 \001(\tB\003\340A\001\022\030\n\013cli" + + "ent_tags\030\005 \003(\tB\003\340A\001\022L\n\nproperties\030\006 \003(\0132" + + "3.google.cloud.dataproc.v1.PrestoJob.Pro" + + "pertiesEntryB\003\340A\001\022D\n\016logging_config\030\007 \001(" + + "\0132\'.google.cloud.dataproc.v1.LoggingConf" + + "igB\003\340A\001\0321\n\017PropertiesEntry\022\013\n\003key\030\001 \001(\t\022" + + "\r\n\005value\030\002 \001(\t:\0028\001B\t\n\007queries\"D\n\014JobPlac" + + "ement\022\031\n\014cluster_name\030\001 \001(\tB\003\340A\002\022\031\n\014clus" + + "ter_uuid\030\002 \001(\tB\003\340A\003\"\331\003\n\tJobStatus\022=\n\005sta" + + "te\030\001 \001(\0162).google.cloud.dataproc.v1.JobS" + + "tatus.StateB\003\340A\003\022\027\n\007details\030\002 \001(\tB\006\340A\003\340A" + + "\001\0229\n\020state_start_time\030\006 \001(\0132\032.google.pro" + + "tobuf.TimestampB\003\340A\003\022C\n\010substate\030\007 \001(\0162," + + ".google.cloud.dataproc.v1.JobStatus.Subs" + + "tateB\003\340A\003\"\251\001\n\005State\022\025\n\021STATE_UNSPECIFIED" + + "\020\000\022\013\n\007PENDING\020\001\022\016\n\nSETUP_DONE\020\010\022\013\n\007RUNNI" + + "NG\020\002\022\022\n\016CANCEL_PENDING\020\003\022\022\n\016CANCEL_START" + + "ED\020\007\022\r\n\tCANCELLED\020\004\022\010\n\004DONE\020\005\022\t\n\005ERROR\020\006" + + "\022\023\n\017ATTEMPT_FAILURE\020\t\"H\n\010Substate\022\017\n\013UNS" + + "PECIFIED\020\000\022\r\n\tSUBMITTED\020\001\022\n\n\006QUEUED\020\002\022\020\n" + + "\014STALE_STATUS\020\003\"<\n\014JobReference\022\027\n\nproje" + + "ct_id\030\001 \001(\tB\003\340A\002\022\023\n\006job_id\030\002 \001(\tB\003\340A\001\"\245\002" + + "\n\017YarnApplication\022\021\n\004name\030\001 \001(\tB\003\340A\002\022C\n\005" + + "state\030\002 \001(\0162/.google.cloud.dataproc.v1.Y" + + "arnApplication.StateB\003\340A\002\022\025\n\010progress\030\003 " + + "\001(\002B\003\340A\002\022\031\n\014tracking_url\030\004 \001(\tB\003\340A\001\"\207\001\n\005" + + "State\022\025\n\021STATE_UNSPECIFIED\020\000\022\007\n\003NEW\020\001\022\016\n" + + "\nNEW_SAVING\020\002\022\r\n\tSUBMITTED\020\003\022\014\n\010ACCEPTED" + + "\020\004\022\013\n\007RUNNING\020\005\022\014\n\010FINISHED\020\006\022\n\n\006FAILED\020" + + "\007\022\n\n\006KILLED\020\010\"\354\010\n\003Job\022>\n\treference\030\001 \001(\013" + + "2&.google.cloud.dataproc.v1.JobReference" + + "B\003\340A\001\022>\n\tplacement\030\002 \001(\0132&.google.cloud." + + "dataproc.v1.JobPlacementB\003\340A\002\022>\n\nhadoop_" + + "job\030\003 \001(\0132#.google.cloud.dataproc.v1.Had" + + "oopJobB\003\340A\001H\000\022<\n\tspark_job\030\004 \001(\0132\".googl" + + "e.cloud.dataproc.v1.SparkJobB\003\340A\001H\000\022@\n\013p" + + "yspark_job\030\005 \001(\0132$.google.cloud.dataproc" + + ".v1.PySparkJobB\003\340A\001H\000\022:\n\010hive_job\030\006 \001(\0132" + + "!.google.cloud.dataproc.v1.HiveJobB\003\340A\001H" + + "\000\0228\n\007pig_job\030\007 \001(\0132 .google.cloud.datapr" + + "oc.v1.PigJobB\003\340A\001H\000\022?\n\013spark_r_job\030\025 \001(\013" + + "2#.google.cloud.dataproc.v1.SparkRJobB\003\340" + + "A\001H\000\022C\n\rspark_sql_job\030\014 \001(\0132%.google.clo" + + "ud.dataproc.v1.SparkSqlJobB\003\340A\001H\000\022>\n\npre" + + "sto_job\030\027 \001(\0132#.google.cloud.dataproc.v1" + + ".PrestoJobB\003\340A\001H\000\0228\n\006status\030\010 \001(\0132#.goog" + + "le.cloud.dataproc.v1.JobStatusB\003\340A\003\022@\n\016s" + + "tatus_history\030\r \003(\0132#.google.cloud.datap" + + "roc.v1.JobStatusB\003\340A\003\022I\n\021yarn_applicatio" + + "ns\030\t \003(\0132).google.cloud.dataproc.v1.Yarn" + + "ApplicationB\003\340A\003\022\'\n\032driver_output_resour" + + "ce_uri\030\021 \001(\tB\003\340A\003\022%\n\030driver_control_file" + + "s_uri\030\017 \001(\tB\003\340A\003\022>\n\006labels\030\022 \003(\0132).googl" + + "e.cloud.dataproc.v1.Job.LabelsEntryB\003\340A\001" + + "\022@\n\nscheduling\030\024 \001(\0132\'.google.cloud.data" + + "proc.v1.JobSchedulingB\003\340A\001\022\025\n\010job_uuid\030\026" + + " \001(\tB\003\340A\003\032-\n\013LabelsEntry\022\013\n\003key\030\001 \001(\t\022\r\n" + + "\005value\030\002 \001(\t:\0028\001B\n\n\010type_job\"3\n\rJobSched" + + "uling\022\"\n\025max_failures_per_hour\030\001 \001(\005B\003\340A" + + "\001\"\212\001\n\020SubmitJobRequest\022\027\n\nproject_id\030\001 \001" + + "(\tB\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340A\002\022/\n\003job\030\002 \001(" + + "\0132\035.google.cloud.dataproc.v1.JobB\003\340A\002\022\027\n" + + "\nrequest_id\030\004 \001(\tB\003\340A\001\"R\n\rGetJobRequest\022" + + "\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001(\t" + + "B\003\340A\002\022\023\n\006job_id\030\002 \001(\tB\003\340A\002\"\263\002\n\017ListJobsR" + + "equest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006regio" + + "n\030\006 \001(\tB\003\340A\002\022\026\n\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\n" + + "page_token\030\003 \001(\tB\003\340A\001\022\031\n\014cluster_name\030\004 " + + "\001(\tB\003\340A\001\022Y\n\021job_state_matcher\030\005 \001(\01629.go" + + "ogle.cloud.dataproc.v1.ListJobsRequest.J" + + "obStateMatcherB\003\340A\001\022\023\n\006filter\030\007 \001(\tB\003\340A\001" + + "\"6\n\017JobStateMatcher\022\007\n\003ALL\020\000\022\n\n\006ACTIVE\020\001" + + "\022\016\n\nNON_ACTIVE\020\002\"\274\001\n\020UpdateJobRequest\022\027\n" + + "\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\002 \001(\tB\003" + + "\340A\002\022\023\n\006job_id\030\003 \001(\tB\003\340A\002\022/\n\003job\030\004 \001(\0132\035." + + "google.cloud.dataproc.v1.JobB\003\340A\002\0224\n\013upd" + + "ate_mask\030\005 \001(\0132\032.google.protobuf.FieldMa" + + "skB\003\340A\002\"b\n\020ListJobsResponse\0220\n\004jobs\030\001 \003(" + + "\0132\035.google.cloud.dataproc.v1.JobB\003\340A\003\022\034\n" + + "\017next_page_token\030\002 \001(\tB\003\340A\001\"U\n\020CancelJob" + + "Request\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006regi" + + "on\030\003 \001(\tB\003\340A\002\022\023\n\006job_id\030\002 \001(\tB\003\340A\002\"U\n\020De" + + "leteJobRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022" + + "\023\n\006region\030\003 \001(\tB\003\340A\002\022\023\n\006job_id\030\002 \001(\tB\003\340A" + + "\0022\233\t\n\rJobController\022\261\001\n\tSubmitJob\022*.goog" + + "le.cloud.dataproc.v1.SubmitJobRequest\032\035." + + "google.cloud.dataproc.v1.Job\"Y\202\323\344\223\002;\"6/v" + + "1/projects/{project_id}/regions/{region}" + + "/jobs:submit:\001*\332A\025project_id,region,job\022" + + "\255\001\n\006GetJob\022\'.google.cloud.dataproc.v1.Ge" + "tJobRequest\032\035.google.cloud.dataproc.v1.J" - + "ob\"Y\202\323\344\223\002;\"6/v1/projects/{project_id}/re" - + "gions/{region}/jobs:submit:\001*\332A\025project_" - + "id,region,job\022\255\001\n\006GetJob\022\'.google.cloud." - + "dataproc.v1.GetJobRequest\032\035.google.cloud" - + ".dataproc.v1.Job\"[\202\323\344\223\002:\0228/v1/projects/{" - + "project_id}/regions/{region}/jobs/{job_i" - + "d}\332A\030project_id,region,job_id\022\311\001\n\010ListJo" - + "bs\022).google.cloud.dataproc.v1.ListJobsRe" - + "quest\032*.google.cloud.dataproc.v1.ListJob" - + "sResponse\"f\202\323\344\223\0021\022//v1/projects/{project" - + "_id}/regions/{region}/jobs\332A\021project_id," - + "region\332A\030project_id,region,filter\022\235\001\n\tUp" - + "dateJob\022*.google.cloud.dataproc.v1.Updat" - + "eJobRequest\032\035.google.cloud.dataproc.v1.J" - + "ob\"E\202\323\344\223\002?28/v1/projects/{project_id}/re" - + "gions/{region}/jobs/{job_id}:\003job\022\275\001\n\tCa" - + "ncelJob\022*.google.cloud.dataproc.v1.Cance" - + "lJobRequest\032\035.google.cloud.dataproc.v1.J" - + "ob\"e\202\323\344\223\002D\"?/v1/projects/{project_id}/re" - + "gions/{region}/jobs/{job_id}:cancel:\001*\332A" - + "\030project_id,region,job_id\022\254\001\n\tDeleteJob\022" - + "*.google.cloud.dataproc.v1.DeleteJobRequ" - + "est\032\026.google.protobuf.Empty\"[\202\323\344\223\002:*8/v1" - + "/projects/{project_id}/regions/{region}/" - + "jobs/{job_id}\332A\030project_id,region,job_id" - + "\032K\312A\027dataproc.googleapis.com\322A.https://w" - + "ww.googleapis.com/auth/cloud-platformBm\n" - + "\034com.google.cloud.dataproc.v1B\tJobsProto" - + "P\001Z@google.golang.org/genproto/googleapi" - + "s/cloud/dataproc/v1;dataprocb\006proto3" + + "ob\"[\202\323\344\223\002:\0228/v1/projects/{project_id}/re" + + "gions/{region}/jobs/{job_id}\332A\030project_i" + + "d,region,job_id\022\311\001\n\010ListJobs\022).google.cl" + + "oud.dataproc.v1.ListJobsRequest\032*.google" + + ".cloud.dataproc.v1.ListJobsResponse\"f\202\323\344" + + "\223\0021\022//v1/projects/{project_id}/regions/{" + + "region}/jobs\332A\021project_id,region\332A\030proje" + + "ct_id,region,filter\022\235\001\n\tUpdateJob\022*.goog" + + "le.cloud.dataproc.v1.UpdateJobRequest\032\035." + + "google.cloud.dataproc.v1.Job\"E\202\323\344\223\002?28/v" + + "1/projects/{project_id}/regions/{region}" + + "/jobs/{job_id}:\003job\022\275\001\n\tCancelJob\022*.goog" + + "le.cloud.dataproc.v1.CancelJobRequest\032\035." + + "google.cloud.dataproc.v1.Job\"e\202\323\344\223\002D\"?/v" + + "1/projects/{project_id}/regions/{region}" + + "/jobs/{job_id}:cancel:\001*\332A\030project_id,re" + + "gion,job_id\022\254\001\n\tDeleteJob\022*.google.cloud" + + ".dataproc.v1.DeleteJobRequest\032\026.google.p" + + "rotobuf.Empty\"[\202\323\344\223\002:*8/v1/projects/{pro" + + "ject_id}/regions/{region}/jobs/{job_id}\332" + + "A\030project_id,region,job_id\032K\312A\027dataproc." + + "googleapis.com\322A.https://www.googleapis." + + "com/auth/cloud-platformBm\n\034com.google.cl" + + "oud.dataproc.v1B\tJobsProtoP\001Z@google.gol" + + "ang.org/genproto/googleapis/cloud/datapr" + + "oc/v1;dataprocb\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -357,6 +396,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.api.AnnotationsProto.getDescriptor(), com.google.api.ClientProto.getDescriptor(), com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.longrunning.OperationsProto.getDescriptor(), com.google.protobuf.EmptyProto.getDescriptor(), com.google.protobuf.FieldMaskProto.getDescriptor(), com.google.protobuf.TimestampProto.getDescriptor(), @@ -547,8 +587,47 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new java.lang.String[] { "Key", "Value", }); - internal_static_google_cloud_dataproc_v1_JobPlacement_descriptor = + internal_static_google_cloud_dataproc_v1_SparkRJob_descriptor = getDescriptor().getMessageTypes().get(8); + internal_static_google_cloud_dataproc_v1_SparkRJob_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_SparkRJob_descriptor, + new java.lang.String[] { + "MainRFileUri", "Args", "FileUris", "ArchiveUris", "Properties", "LoggingConfig", + }); + internal_static_google_cloud_dataproc_v1_SparkRJob_PropertiesEntry_descriptor = + internal_static_google_cloud_dataproc_v1_SparkRJob_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1_SparkRJob_PropertiesEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_SparkRJob_PropertiesEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_cloud_dataproc_v1_PrestoJob_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_google_cloud_dataproc_v1_PrestoJob_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_PrestoJob_descriptor, + new java.lang.String[] { + "QueryFileUri", + "QueryList", + "ContinueOnFailure", + "OutputFormat", + "ClientTags", + "Properties", + "LoggingConfig", + "Queries", + }); + internal_static_google_cloud_dataproc_v1_PrestoJob_PropertiesEntry_descriptor = + internal_static_google_cloud_dataproc_v1_PrestoJob_descriptor.getNestedTypes().get(0); + internal_static_google_cloud_dataproc_v1_PrestoJob_PropertiesEntry_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_dataproc_v1_PrestoJob_PropertiesEntry_descriptor, + new java.lang.String[] { + "Key", "Value", + }); + internal_static_google_cloud_dataproc_v1_JobPlacement_descriptor = + getDescriptor().getMessageTypes().get(10); internal_static_google_cloud_dataproc_v1_JobPlacement_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_JobPlacement_descriptor, @@ -556,7 +635,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ClusterName", "ClusterUuid", }); internal_static_google_cloud_dataproc_v1_JobStatus_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(11); internal_static_google_cloud_dataproc_v1_JobStatus_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_JobStatus_descriptor, @@ -564,7 +643,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "State", "Details", "StateStartTime", "Substate", }); internal_static_google_cloud_dataproc_v1_JobReference_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(12); internal_static_google_cloud_dataproc_v1_JobReference_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_JobReference_descriptor, @@ -572,7 +651,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ProjectId", "JobId", }); internal_static_google_cloud_dataproc_v1_YarnApplication_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(13); internal_static_google_cloud_dataproc_v1_YarnApplication_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_YarnApplication_descriptor, @@ -580,7 +659,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Name", "State", "Progress", "TrackingUrl", }); internal_static_google_cloud_dataproc_v1_Job_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(14); internal_static_google_cloud_dataproc_v1_Job_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_Job_descriptor, @@ -592,7 +671,9 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "PysparkJob", "HiveJob", "PigJob", + "SparkRJob", "SparkSqlJob", + "PrestoJob", "Status", "StatusHistory", "YarnApplications", @@ -612,7 +693,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Key", "Value", }); internal_static_google_cloud_dataproc_v1_JobScheduling_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(15); internal_static_google_cloud_dataproc_v1_JobScheduling_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_JobScheduling_descriptor, @@ -620,7 +701,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "MaxFailuresPerHour", }); internal_static_google_cloud_dataproc_v1_SubmitJobRequest_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(16); internal_static_google_cloud_dataproc_v1_SubmitJobRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_SubmitJobRequest_descriptor, @@ -628,7 +709,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ProjectId", "Region", "Job", "RequestId", }); internal_static_google_cloud_dataproc_v1_GetJobRequest_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(17); internal_static_google_cloud_dataproc_v1_GetJobRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_GetJobRequest_descriptor, @@ -636,7 +717,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ProjectId", "Region", "JobId", }); internal_static_google_cloud_dataproc_v1_ListJobsRequest_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(18); internal_static_google_cloud_dataproc_v1_ListJobsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_ListJobsRequest_descriptor, @@ -650,7 +731,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Filter", }); internal_static_google_cloud_dataproc_v1_UpdateJobRequest_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(19); internal_static_google_cloud_dataproc_v1_UpdateJobRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_UpdateJobRequest_descriptor, @@ -658,7 +739,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ProjectId", "Region", "JobId", "Job", "UpdateMask", }); internal_static_google_cloud_dataproc_v1_ListJobsResponse_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(20); internal_static_google_cloud_dataproc_v1_ListJobsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_ListJobsResponse_descriptor, @@ -666,7 +747,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Jobs", "NextPageToken", }); internal_static_google_cloud_dataproc_v1_CancelJobRequest_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(21); internal_static_google_cloud_dataproc_v1_CancelJobRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_CancelJobRequest_descriptor, @@ -674,7 +755,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ProjectId", "Region", "JobId", }); internal_static_google_cloud_dataproc_v1_DeleteJobRequest_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(22); internal_static_google_cloud_dataproc_v1_DeleteJobRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_dataproc_v1_DeleteJobRequest_descriptor, @@ -693,6 +774,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.api.AnnotationsProto.getDescriptor(); com.google.api.ClientProto.getDescriptor(); com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.longrunning.OperationsProto.getDescriptor(); com.google.protobuf.EmptyProto.getDescriptor(); com.google.protobuf.FieldMaskProto.getDescriptor(); com.google.protobuf.TimestampProto.getDescriptor(); diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfig.java new file mode 100644 index 00000000..1c239f35 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfig.java @@ -0,0 +1,1881 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * Specifies the cluster auto-delete schedule configuration.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.LifecycleConfig} + */ +public final class LifecycleConfig extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.LifecycleConfig) + LifecycleConfigOrBuilder { + private static final long serialVersionUID = 0L; + // Use LifecycleConfig.newBuilder() to construct. + private LifecycleConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private LifecycleConfig() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new LifecycleConfig(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private LifecycleConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.protobuf.Duration.Builder subBuilder = null; + if (idleDeleteTtl_ != null) { + subBuilder = idleDeleteTtl_.toBuilder(); + } + idleDeleteTtl_ = + input.readMessage(com.google.protobuf.Duration.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(idleDeleteTtl_); + idleDeleteTtl_ = subBuilder.buildPartial(); + } + + break; + } + case 18: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (ttlCase_ == 2) { + subBuilder = ((com.google.protobuf.Timestamp) ttl_).toBuilder(); + } + ttl_ = input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.protobuf.Timestamp) ttl_); + ttl_ = subBuilder.buildPartial(); + } + ttlCase_ = 2; + break; + } + case 26: + { + com.google.protobuf.Duration.Builder subBuilder = null; + if (ttlCase_ == 3) { + subBuilder = ((com.google.protobuf.Duration) ttl_).toBuilder(); + } + ttl_ = input.readMessage(com.google.protobuf.Duration.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.protobuf.Duration) ttl_); + ttl_ = subBuilder.buildPartial(); + } + ttlCase_ = 3; + break; + } + case 34: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (idleStartTime_ != null) { + subBuilder = idleStartTime_.toBuilder(); + } + idleStartTime_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(idleStartTime_); + idleStartTime_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_LifecycleConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_LifecycleConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.LifecycleConfig.class, + com.google.cloud.dataproc.v1.LifecycleConfig.Builder.class); + } + + private int ttlCase_ = 0; + private java.lang.Object ttl_; + + public enum TtlCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + AUTO_DELETE_TIME(2), + AUTO_DELETE_TTL(3), + TTL_NOT_SET(0); + private final int value; + + private TtlCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static TtlCase valueOf(int value) { + return forNumber(value); + } + + public static TtlCase forNumber(int value) { + switch (value) { + case 2: + return AUTO_DELETE_TIME; + case 3: + return AUTO_DELETE_TTL; + case 0: + return TTL_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public TtlCase getTtlCase() { + return TtlCase.forNumber(ttlCase_); + } + + public static final int IDLE_DELETE_TTL_FIELD_NUMBER = 1; + private com.google.protobuf.Duration idleDeleteTtl_; + /** + * + * + *
+   * Optional. The duration to keep the cluster alive while idling (when no jobs
+   * are running). Passing this threshold will cause the cluster to be
+   * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+   * representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+   * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the idleDeleteTtl field is set. + */ + public boolean hasIdleDeleteTtl() { + return idleDeleteTtl_ != null; + } + /** + * + * + *
+   * Optional. The duration to keep the cluster alive while idling (when no jobs
+   * are running). Passing this threshold will cause the cluster to be
+   * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+   * representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+   * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The idleDeleteTtl. + */ + public com.google.protobuf.Duration getIdleDeleteTtl() { + return idleDeleteTtl_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : idleDeleteTtl_; + } + /** + * + * + *
+   * Optional. The duration to keep the cluster alive while idling (when no jobs
+   * are running). Passing this threshold will cause the cluster to be
+   * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+   * representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+   * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.DurationOrBuilder getIdleDeleteTtlOrBuilder() { + return getIdleDeleteTtl(); + } + + public static final int AUTO_DELETE_TIME_FIELD_NUMBER = 2; + /** + * + * + *
+   * Optional. The time when cluster will be auto-deleted (see JSON representation of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * + * .google.protobuf.Timestamp auto_delete_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoDeleteTime field is set. + */ + public boolean hasAutoDeleteTime() { + return ttlCase_ == 2; + } + /** + * + * + *
+   * Optional. The time when cluster will be auto-deleted (see JSON representation of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * + * .google.protobuf.Timestamp auto_delete_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoDeleteTime. + */ + public com.google.protobuf.Timestamp getAutoDeleteTime() { + if (ttlCase_ == 2) { + return (com.google.protobuf.Timestamp) ttl_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + /** + * + * + *
+   * Optional. The time when cluster will be auto-deleted (see JSON representation of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * + * .google.protobuf.Timestamp auto_delete_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.TimestampOrBuilder getAutoDeleteTimeOrBuilder() { + if (ttlCase_ == 2) { + return (com.google.protobuf.Timestamp) ttl_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + + public static final int AUTO_DELETE_TTL_FIELD_NUMBER = 3; + /** + * + * + *
+   * Optional. The lifetime duration of cluster. The cluster will be
+   * auto-deleted at the end of this period. Minimum value is 10 minutes;
+   * maximum value is 14 days (see JSON representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoDeleteTtl field is set. + */ + public boolean hasAutoDeleteTtl() { + return ttlCase_ == 3; + } + /** + * + * + *
+   * Optional. The lifetime duration of cluster. The cluster will be
+   * auto-deleted at the end of this period. Minimum value is 10 minutes;
+   * maximum value is 14 days (see JSON representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoDeleteTtl. + */ + public com.google.protobuf.Duration getAutoDeleteTtl() { + if (ttlCase_ == 3) { + return (com.google.protobuf.Duration) ttl_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + /** + * + * + *
+   * Optional. The lifetime duration of cluster. The cluster will be
+   * auto-deleted at the end of this period. Minimum value is 10 minutes;
+   * maximum value is 14 days (see JSON representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.DurationOrBuilder getAutoDeleteTtlOrBuilder() { + if (ttlCase_ == 3) { + return (com.google.protobuf.Duration) ttl_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + + public static final int IDLE_START_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp idleStartTime_; + /** + * + * + *
+   * Output only. The time when cluster became idle (most recent job finished)
+   * and became eligible for deletion due to idleness (see JSON representation
+   * of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * + * .google.protobuf.Timestamp idle_start_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the idleStartTime field is set. + */ + public boolean hasIdleStartTime() { + return idleStartTime_ != null; + } + /** + * + * + *
+   * Output only. The time when cluster became idle (most recent job finished)
+   * and became eligible for deletion due to idleness (see JSON representation
+   * of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * + * .google.protobuf.Timestamp idle_start_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The idleStartTime. + */ + public com.google.protobuf.Timestamp getIdleStartTime() { + return idleStartTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : idleStartTime_; + } + /** + * + * + *
+   * Output only. The time when cluster became idle (most recent job finished)
+   * and became eligible for deletion due to idleness (see JSON representation
+   * of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * + * .google.protobuf.Timestamp idle_start_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getIdleStartTimeOrBuilder() { + return getIdleStartTime(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (idleDeleteTtl_ != null) { + output.writeMessage(1, getIdleDeleteTtl()); + } + if (ttlCase_ == 2) { + output.writeMessage(2, (com.google.protobuf.Timestamp) ttl_); + } + if (ttlCase_ == 3) { + output.writeMessage(3, (com.google.protobuf.Duration) ttl_); + } + if (idleStartTime_ != null) { + output.writeMessage(4, getIdleStartTime()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (idleDeleteTtl_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getIdleDeleteTtl()); + } + if (ttlCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.protobuf.Timestamp) ttl_); + } + if (ttlCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, (com.google.protobuf.Duration) ttl_); + } + if (idleStartTime_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getIdleStartTime()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.LifecycleConfig)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.LifecycleConfig other = + (com.google.cloud.dataproc.v1.LifecycleConfig) obj; + + if (hasIdleDeleteTtl() != other.hasIdleDeleteTtl()) return false; + if (hasIdleDeleteTtl()) { + if (!getIdleDeleteTtl().equals(other.getIdleDeleteTtl())) return false; + } + if (hasIdleStartTime() != other.hasIdleStartTime()) return false; + if (hasIdleStartTime()) { + if (!getIdleStartTime().equals(other.getIdleStartTime())) return false; + } + if (!getTtlCase().equals(other.getTtlCase())) return false; + switch (ttlCase_) { + case 2: + if (!getAutoDeleteTime().equals(other.getAutoDeleteTime())) return false; + break; + case 3: + if (!getAutoDeleteTtl().equals(other.getAutoDeleteTtl())) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasIdleDeleteTtl()) { + hash = (37 * hash) + IDLE_DELETE_TTL_FIELD_NUMBER; + hash = (53 * hash) + getIdleDeleteTtl().hashCode(); + } + if (hasIdleStartTime()) { + hash = (37 * hash) + IDLE_START_TIME_FIELD_NUMBER; + hash = (53 * hash) + getIdleStartTime().hashCode(); + } + switch (ttlCase_) { + case 2: + hash = (37 * hash) + AUTO_DELETE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getAutoDeleteTime().hashCode(); + break; + case 3: + hash = (37 * hash) + AUTO_DELETE_TTL_FIELD_NUMBER; + hash = (53 * hash) + getAutoDeleteTtl().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.LifecycleConfig parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.LifecycleConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.LifecycleConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.LifecycleConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.LifecycleConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.LifecycleConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.LifecycleConfig parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.LifecycleConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.LifecycleConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.LifecycleConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.LifecycleConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.LifecycleConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.LifecycleConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Specifies the cluster auto-delete schedule configuration.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.LifecycleConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.LifecycleConfig) + com.google.cloud.dataproc.v1.LifecycleConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_LifecycleConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_LifecycleConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.LifecycleConfig.class, + com.google.cloud.dataproc.v1.LifecycleConfig.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.LifecycleConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + if (idleDeleteTtlBuilder_ == null) { + idleDeleteTtl_ = null; + } else { + idleDeleteTtl_ = null; + idleDeleteTtlBuilder_ = null; + } + if (idleStartTimeBuilder_ == null) { + idleStartTime_ = null; + } else { + idleStartTime_ = null; + idleStartTimeBuilder_ = null; + } + ttlCase_ = 0; + ttl_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_LifecycleConfig_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.LifecycleConfig getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.LifecycleConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.LifecycleConfig build() { + com.google.cloud.dataproc.v1.LifecycleConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.LifecycleConfig buildPartial() { + com.google.cloud.dataproc.v1.LifecycleConfig result = + new com.google.cloud.dataproc.v1.LifecycleConfig(this); + if (idleDeleteTtlBuilder_ == null) { + result.idleDeleteTtl_ = idleDeleteTtl_; + } else { + result.idleDeleteTtl_ = idleDeleteTtlBuilder_.build(); + } + if (ttlCase_ == 2) { + if (autoDeleteTimeBuilder_ == null) { + result.ttl_ = ttl_; + } else { + result.ttl_ = autoDeleteTimeBuilder_.build(); + } + } + if (ttlCase_ == 3) { + if (autoDeleteTtlBuilder_ == null) { + result.ttl_ = ttl_; + } else { + result.ttl_ = autoDeleteTtlBuilder_.build(); + } + } + if (idleStartTimeBuilder_ == null) { + result.idleStartTime_ = idleStartTime_; + } else { + result.idleStartTime_ = idleStartTimeBuilder_.build(); + } + result.ttlCase_ = ttlCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.LifecycleConfig) { + return mergeFrom((com.google.cloud.dataproc.v1.LifecycleConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.LifecycleConfig other) { + if (other == com.google.cloud.dataproc.v1.LifecycleConfig.getDefaultInstance()) return this; + if (other.hasIdleDeleteTtl()) { + mergeIdleDeleteTtl(other.getIdleDeleteTtl()); + } + if (other.hasIdleStartTime()) { + mergeIdleStartTime(other.getIdleStartTime()); + } + switch (other.getTtlCase()) { + case AUTO_DELETE_TIME: + { + mergeAutoDeleteTime(other.getAutoDeleteTime()); + break; + } + case AUTO_DELETE_TTL: + { + mergeAutoDeleteTtl(other.getAutoDeleteTtl()); + break; + } + case TTL_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.LifecycleConfig parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.LifecycleConfig) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int ttlCase_ = 0; + private java.lang.Object ttl_; + + public TtlCase getTtlCase() { + return TtlCase.forNumber(ttlCase_); + } + + public Builder clearTtl() { + ttlCase_ = 0; + ttl_ = null; + onChanged(); + return this; + } + + private com.google.protobuf.Duration idleDeleteTtl_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + idleDeleteTtlBuilder_; + /** + * + * + *
+     * Optional. The duration to keep the cluster alive while idling (when no jobs
+     * are running). Passing this threshold will cause the cluster to be
+     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * 
+ * + * + * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the idleDeleteTtl field is set. + */ + public boolean hasIdleDeleteTtl() { + return idleDeleteTtlBuilder_ != null || idleDeleteTtl_ != null; + } + /** + * + * + *
+     * Optional. The duration to keep the cluster alive while idling (when no jobs
+     * are running). Passing this threshold will cause the cluster to be
+     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * 
+ * + * + * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The idleDeleteTtl. + */ + public com.google.protobuf.Duration getIdleDeleteTtl() { + if (idleDeleteTtlBuilder_ == null) { + return idleDeleteTtl_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : idleDeleteTtl_; + } else { + return idleDeleteTtlBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Optional. The duration to keep the cluster alive while idling (when no jobs
+     * are running). Passing this threshold will cause the cluster to be
+     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * 
+ * + * + * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setIdleDeleteTtl(com.google.protobuf.Duration value) { + if (idleDeleteTtlBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + idleDeleteTtl_ = value; + onChanged(); + } else { + idleDeleteTtlBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Optional. The duration to keep the cluster alive while idling (when no jobs
+     * are running). Passing this threshold will cause the cluster to be
+     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * 
+ * + * + * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setIdleDeleteTtl(com.google.protobuf.Duration.Builder builderForValue) { + if (idleDeleteTtlBuilder_ == null) { + idleDeleteTtl_ = builderForValue.build(); + onChanged(); + } else { + idleDeleteTtlBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Optional. The duration to keep the cluster alive while idling (when no jobs
+     * are running). Passing this threshold will cause the cluster to be
+     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * 
+ * + * + * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeIdleDeleteTtl(com.google.protobuf.Duration value) { + if (idleDeleteTtlBuilder_ == null) { + if (idleDeleteTtl_ != null) { + idleDeleteTtl_ = + com.google.protobuf.Duration.newBuilder(idleDeleteTtl_) + .mergeFrom(value) + .buildPartial(); + } else { + idleDeleteTtl_ = value; + } + onChanged(); + } else { + idleDeleteTtlBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Optional. The duration to keep the cluster alive while idling (when no jobs
+     * are running). Passing this threshold will cause the cluster to be
+     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * 
+ * + * + * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearIdleDeleteTtl() { + if (idleDeleteTtlBuilder_ == null) { + idleDeleteTtl_ = null; + onChanged(); + } else { + idleDeleteTtl_ = null; + idleDeleteTtlBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Optional. The duration to keep the cluster alive while idling (when no jobs
+     * are running). Passing this threshold will cause the cluster to be
+     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * 
+ * + * + * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Duration.Builder getIdleDeleteTtlBuilder() { + + onChanged(); + return getIdleDeleteTtlFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. The duration to keep the cluster alive while idling (when no jobs
+     * are running). Passing this threshold will cause the cluster to be
+     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * 
+ * + * + * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.DurationOrBuilder getIdleDeleteTtlOrBuilder() { + if (idleDeleteTtlBuilder_ != null) { + return idleDeleteTtlBuilder_.getMessageOrBuilder(); + } else { + return idleDeleteTtl_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : idleDeleteTtl_; + } + } + /** + * + * + *
+     * Optional. The duration to keep the cluster alive while idling (when no jobs
+     * are running). Passing this threshold will cause the cluster to be
+     * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+     * representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+     * 
+ * + * + * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + getIdleDeleteTtlFieldBuilder() { + if (idleDeleteTtlBuilder_ == null) { + idleDeleteTtlBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>( + getIdleDeleteTtl(), getParentForChildren(), isClean()); + idleDeleteTtl_ = null; + } + return idleDeleteTtlBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + autoDeleteTimeBuilder_; + /** + * + * + *
+     * Optional. The time when cluster will be auto-deleted (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp auto_delete_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoDeleteTime field is set. + */ + public boolean hasAutoDeleteTime() { + return ttlCase_ == 2; + } + /** + * + * + *
+     * Optional. The time when cluster will be auto-deleted (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp auto_delete_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoDeleteTime. + */ + public com.google.protobuf.Timestamp getAutoDeleteTime() { + if (autoDeleteTimeBuilder_ == null) { + if (ttlCase_ == 2) { + return (com.google.protobuf.Timestamp) ttl_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } else { + if (ttlCase_ == 2) { + return autoDeleteTimeBuilder_.getMessage(); + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. The time when cluster will be auto-deleted (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp auto_delete_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAutoDeleteTime(com.google.protobuf.Timestamp value) { + if (autoDeleteTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ttl_ = value; + onChanged(); + } else { + autoDeleteTimeBuilder_.setMessage(value); + } + ttlCase_ = 2; + return this; + } + /** + * + * + *
+     * Optional. The time when cluster will be auto-deleted (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp auto_delete_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAutoDeleteTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (autoDeleteTimeBuilder_ == null) { + ttl_ = builderForValue.build(); + onChanged(); + } else { + autoDeleteTimeBuilder_.setMessage(builderForValue.build()); + } + ttlCase_ = 2; + return this; + } + /** + * + * + *
+     * Optional. The time when cluster will be auto-deleted (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp auto_delete_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeAutoDeleteTime(com.google.protobuf.Timestamp value) { + if (autoDeleteTimeBuilder_ == null) { + if (ttlCase_ == 2 && ttl_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + ttl_ = + com.google.protobuf.Timestamp.newBuilder((com.google.protobuf.Timestamp) ttl_) + .mergeFrom(value) + .buildPartial(); + } else { + ttl_ = value; + } + onChanged(); + } else { + if (ttlCase_ == 2) { + autoDeleteTimeBuilder_.mergeFrom(value); + } + autoDeleteTimeBuilder_.setMessage(value); + } + ttlCase_ = 2; + return this; + } + /** + * + * + *
+     * Optional. The time when cluster will be auto-deleted (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp auto_delete_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearAutoDeleteTime() { + if (autoDeleteTimeBuilder_ == null) { + if (ttlCase_ == 2) { + ttlCase_ = 0; + ttl_ = null; + onChanged(); + } + } else { + if (ttlCase_ == 2) { + ttlCase_ = 0; + ttl_ = null; + } + autoDeleteTimeBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Optional. The time when cluster will be auto-deleted (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp auto_delete_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Timestamp.Builder getAutoDeleteTimeBuilder() { + return getAutoDeleteTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. The time when cluster will be auto-deleted (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp auto_delete_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.TimestampOrBuilder getAutoDeleteTimeOrBuilder() { + if ((ttlCase_ == 2) && (autoDeleteTimeBuilder_ != null)) { + return autoDeleteTimeBuilder_.getMessageOrBuilder(); + } else { + if (ttlCase_ == 2) { + return (com.google.protobuf.Timestamp) ttl_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. The time when cluster will be auto-deleted (see JSON representation of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp auto_delete_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getAutoDeleteTimeFieldBuilder() { + if (autoDeleteTimeBuilder_ == null) { + if (!(ttlCase_ == 2)) { + ttl_ = com.google.protobuf.Timestamp.getDefaultInstance(); + } + autoDeleteTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + (com.google.protobuf.Timestamp) ttl_, getParentForChildren(), isClean()); + ttl_ = null; + } + ttlCase_ = 2; + onChanged(); + ; + return autoDeleteTimeBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + autoDeleteTtlBuilder_; + /** + * + * + *
+     * Optional. The lifetime duration of cluster. The cluster will be
+     * auto-deleted at the end of this period. Minimum value is 10 minutes;
+     * maximum value is 14 days (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration auto_delete_ttl = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoDeleteTtl field is set. + */ + public boolean hasAutoDeleteTtl() { + return ttlCase_ == 3; + } + /** + * + * + *
+     * Optional. The lifetime duration of cluster. The cluster will be
+     * auto-deleted at the end of this period. Minimum value is 10 minutes;
+     * maximum value is 14 days (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration auto_delete_ttl = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoDeleteTtl. + */ + public com.google.protobuf.Duration getAutoDeleteTtl() { + if (autoDeleteTtlBuilder_ == null) { + if (ttlCase_ == 3) { + return (com.google.protobuf.Duration) ttl_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } else { + if (ttlCase_ == 3) { + return autoDeleteTtlBuilder_.getMessage(); + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. The lifetime duration of cluster. The cluster will be
+     * auto-deleted at the end of this period. Minimum value is 10 minutes;
+     * maximum value is 14 days (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration auto_delete_ttl = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAutoDeleteTtl(com.google.protobuf.Duration value) { + if (autoDeleteTtlBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ttl_ = value; + onChanged(); + } else { + autoDeleteTtlBuilder_.setMessage(value); + } + ttlCase_ = 3; + return this; + } + /** + * + * + *
+     * Optional. The lifetime duration of cluster. The cluster will be
+     * auto-deleted at the end of this period. Minimum value is 10 minutes;
+     * maximum value is 14 days (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration auto_delete_ttl = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAutoDeleteTtl(com.google.protobuf.Duration.Builder builderForValue) { + if (autoDeleteTtlBuilder_ == null) { + ttl_ = builderForValue.build(); + onChanged(); + } else { + autoDeleteTtlBuilder_.setMessage(builderForValue.build()); + } + ttlCase_ = 3; + return this; + } + /** + * + * + *
+     * Optional. The lifetime duration of cluster. The cluster will be
+     * auto-deleted at the end of this period. Minimum value is 10 minutes;
+     * maximum value is 14 days (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration auto_delete_ttl = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeAutoDeleteTtl(com.google.protobuf.Duration value) { + if (autoDeleteTtlBuilder_ == null) { + if (ttlCase_ == 3 && ttl_ != com.google.protobuf.Duration.getDefaultInstance()) { + ttl_ = + com.google.protobuf.Duration.newBuilder((com.google.protobuf.Duration) ttl_) + .mergeFrom(value) + .buildPartial(); + } else { + ttl_ = value; + } + onChanged(); + } else { + if (ttlCase_ == 3) { + autoDeleteTtlBuilder_.mergeFrom(value); + } + autoDeleteTtlBuilder_.setMessage(value); + } + ttlCase_ = 3; + return this; + } + /** + * + * + *
+     * Optional. The lifetime duration of cluster. The cluster will be
+     * auto-deleted at the end of this period. Minimum value is 10 minutes;
+     * maximum value is 14 days (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration auto_delete_ttl = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearAutoDeleteTtl() { + if (autoDeleteTtlBuilder_ == null) { + if (ttlCase_ == 3) { + ttlCase_ = 0; + ttl_ = null; + onChanged(); + } + } else { + if (ttlCase_ == 3) { + ttlCase_ = 0; + ttl_ = null; + } + autoDeleteTtlBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Optional. The lifetime duration of cluster. The cluster will be
+     * auto-deleted at the end of this period. Minimum value is 10 minutes;
+     * maximum value is 14 days (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration auto_delete_ttl = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Duration.Builder getAutoDeleteTtlBuilder() { + return getAutoDeleteTtlFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. The lifetime duration of cluster. The cluster will be
+     * auto-deleted at the end of this period. Minimum value is 10 minutes;
+     * maximum value is 14 days (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration auto_delete_ttl = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.DurationOrBuilder getAutoDeleteTtlOrBuilder() { + if ((ttlCase_ == 3) && (autoDeleteTtlBuilder_ != null)) { + return autoDeleteTtlBuilder_.getMessageOrBuilder(); + } else { + if (ttlCase_ == 3) { + return (com.google.protobuf.Duration) ttl_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + } + /** + * + * + *
+     * Optional. The lifetime duration of cluster. The cluster will be
+     * auto-deleted at the end of this period. Minimum value is 10 minutes;
+     * maximum value is 14 days (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Duration auto_delete_ttl = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + getAutoDeleteTtlFieldBuilder() { + if (autoDeleteTtlBuilder_ == null) { + if (!(ttlCase_ == 3)) { + ttl_ = com.google.protobuf.Duration.getDefaultInstance(); + } + autoDeleteTtlBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>( + (com.google.protobuf.Duration) ttl_, getParentForChildren(), isClean()); + ttl_ = null; + } + ttlCase_ = 3; + onChanged(); + ; + return autoDeleteTtlBuilder_; + } + + private com.google.protobuf.Timestamp idleStartTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + idleStartTimeBuilder_; + /** + * + * + *
+     * Output only. The time when cluster became idle (most recent job finished)
+     * and became eligible for deletion due to idleness (see JSON representation
+     * of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp idle_start_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the idleStartTime field is set. + */ + public boolean hasIdleStartTime() { + return idleStartTimeBuilder_ != null || idleStartTime_ != null; + } + /** + * + * + *
+     * Output only. The time when cluster became idle (most recent job finished)
+     * and became eligible for deletion due to idleness (see JSON representation
+     * of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp idle_start_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The idleStartTime. + */ + public com.google.protobuf.Timestamp getIdleStartTime() { + if (idleStartTimeBuilder_ == null) { + return idleStartTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : idleStartTime_; + } else { + return idleStartTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Output only. The time when cluster became idle (most recent job finished)
+     * and became eligible for deletion due to idleness (see JSON representation
+     * of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp idle_start_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setIdleStartTime(com.google.protobuf.Timestamp value) { + if (idleStartTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + idleStartTime_ = value; + onChanged(); + } else { + idleStartTimeBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Output only. The time when cluster became idle (most recent job finished)
+     * and became eligible for deletion due to idleness (see JSON representation
+     * of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp idle_start_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setIdleStartTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (idleStartTimeBuilder_ == null) { + idleStartTime_ = builderForValue.build(); + onChanged(); + } else { + idleStartTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Output only. The time when cluster became idle (most recent job finished)
+     * and became eligible for deletion due to idleness (see JSON representation
+     * of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp idle_start_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeIdleStartTime(com.google.protobuf.Timestamp value) { + if (idleStartTimeBuilder_ == null) { + if (idleStartTime_ != null) { + idleStartTime_ = + com.google.protobuf.Timestamp.newBuilder(idleStartTime_) + .mergeFrom(value) + .buildPartial(); + } else { + idleStartTime_ = value; + } + onChanged(); + } else { + idleStartTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Output only. The time when cluster became idle (most recent job finished)
+     * and became eligible for deletion due to idleness (see JSON representation
+     * of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp idle_start_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearIdleStartTime() { + if (idleStartTimeBuilder_ == null) { + idleStartTime_ = null; + onChanged(); + } else { + idleStartTime_ = null; + idleStartTimeBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Output only. The time when cluster became idle (most recent job finished)
+     * and became eligible for deletion due to idleness (see JSON representation
+     * of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp idle_start_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getIdleStartTimeBuilder() { + + onChanged(); + return getIdleStartTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Output only. The time when cluster became idle (most recent job finished)
+     * and became eligible for deletion due to idleness (see JSON representation
+     * of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp idle_start_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getIdleStartTimeOrBuilder() { + if (idleStartTimeBuilder_ != null) { + return idleStartTimeBuilder_.getMessageOrBuilder(); + } else { + return idleStartTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : idleStartTime_; + } + } + /** + * + * + *
+     * Output only. The time when cluster became idle (most recent job finished)
+     * and became eligible for deletion due to idleness (see JSON representation
+     * of
+     * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * 
+ * + * + * .google.protobuf.Timestamp idle_start_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getIdleStartTimeFieldBuilder() { + if (idleStartTimeBuilder_ == null) { + idleStartTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getIdleStartTime(), getParentForChildren(), isClean()); + idleStartTime_ = null; + } + return idleStartTimeBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.LifecycleConfig) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.LifecycleConfig) + private static final com.google.cloud.dataproc.v1.LifecycleConfig DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.LifecycleConfig(); + } + + public static com.google.cloud.dataproc.v1.LifecycleConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public LifecycleConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new LifecycleConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.LifecycleConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfigOrBuilder.java new file mode 100644 index 00000000..624e8e12 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/LifecycleConfigOrBuilder.java @@ -0,0 +1,218 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +public interface LifecycleConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.LifecycleConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. The duration to keep the cluster alive while idling (when no jobs
+   * are running). Passing this threshold will cause the cluster to be
+   * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+   * representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+   * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the idleDeleteTtl field is set. + */ + boolean hasIdleDeleteTtl(); + /** + * + * + *
+   * Optional. The duration to keep the cluster alive while idling (when no jobs
+   * are running). Passing this threshold will cause the cluster to be
+   * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+   * representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+   * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The idleDeleteTtl. + */ + com.google.protobuf.Duration getIdleDeleteTtl(); + /** + * + * + *
+   * Optional. The duration to keep the cluster alive while idling (when no jobs
+   * are running). Passing this threshold will cause the cluster to be
+   * deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
+   * representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
+   * 
+ * + * .google.protobuf.Duration idle_delete_ttl = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.DurationOrBuilder getIdleDeleteTtlOrBuilder(); + + /** + * + * + *
+   * Optional. The time when cluster will be auto-deleted (see JSON representation of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * + * .google.protobuf.Timestamp auto_delete_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoDeleteTime field is set. + */ + boolean hasAutoDeleteTime(); + /** + * + * + *
+   * Optional. The time when cluster will be auto-deleted (see JSON representation of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * + * .google.protobuf.Timestamp auto_delete_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoDeleteTime. + */ + com.google.protobuf.Timestamp getAutoDeleteTime(); + /** + * + * + *
+   * Optional. The time when cluster will be auto-deleted (see JSON representation of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * + * .google.protobuf.Timestamp auto_delete_time = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.TimestampOrBuilder getAutoDeleteTimeOrBuilder(); + + /** + * + * + *
+   * Optional. The lifetime duration of cluster. The cluster will be
+   * auto-deleted at the end of this period. Minimum value is 10 minutes;
+   * maximum value is 14 days (see JSON representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoDeleteTtl field is set. + */ + boolean hasAutoDeleteTtl(); + /** + * + * + *
+   * Optional. The lifetime duration of cluster. The cluster will be
+   * auto-deleted at the end of this period. Minimum value is 10 minutes;
+   * maximum value is 14 days (see JSON representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoDeleteTtl. + */ + com.google.protobuf.Duration getAutoDeleteTtl(); + /** + * + * + *
+   * Optional. The lifetime duration of cluster. The cluster will be
+   * auto-deleted at the end of this period. Minimum value is 10 minutes;
+   * maximum value is 14 days (see JSON representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * .google.protobuf.Duration auto_delete_ttl = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.DurationOrBuilder getAutoDeleteTtlOrBuilder(); + + /** + * + * + *
+   * Output only. The time when cluster became idle (most recent job finished)
+   * and became eligible for deletion due to idleness (see JSON representation
+   * of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * + * .google.protobuf.Timestamp idle_start_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the idleStartTime field is set. + */ + boolean hasIdleStartTime(); + /** + * + * + *
+   * Output only. The time when cluster became idle (most recent job finished)
+   * and became eligible for deletion due to idleness (see JSON representation
+   * of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * + * .google.protobuf.Timestamp idle_start_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The idleStartTime. + */ + com.google.protobuf.Timestamp getIdleStartTime(); + /** + * + * + *
+   * Output only. The time when cluster became idle (most recent job finished)
+   * and became eligible for deletion due to idleness (see JSON representation
+   * of
+   * [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * 
+ * + * + * .google.protobuf.Timestamp idle_start_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getIdleStartTimeOrBuilder(); + + public com.google.cloud.dataproc.v1.LifecycleConfig.TtlCase getTtlCase(); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeInitializationAction.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeInitializationAction.java index efa2c1d8..ebbd7de2 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeInitializationAction.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeInitializationAction.java @@ -181,7 +181,9 @@ public com.google.protobuf.ByteString getExecutableFileBytes() { * *
    * Optional. Amount of time executable has to complete. Default is
-   * 10 minutes. Cluster creation fails with an explanatory error message (the
+   * 10 minutes (see JSON representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * Cluster creation fails with an explanatory error message (the
    * name of the executable that caused the error and the exceeded timeout
    * period) if the executable is not completed at end of the timeout period.
    * 
@@ -200,7 +202,9 @@ public boolean hasExecutionTimeout() { * *
    * Optional. Amount of time executable has to complete. Default is
-   * 10 minutes. Cluster creation fails with an explanatory error message (the
+   * 10 minutes (see JSON representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * Cluster creation fails with an explanatory error message (the
    * name of the executable that caused the error and the exceeded timeout
    * period) if the executable is not completed at end of the timeout period.
    * 
@@ -221,7 +225,9 @@ public com.google.protobuf.Duration getExecutionTimeout() { * *
    * Optional. Amount of time executable has to complete. Default is
-   * 10 minutes. Cluster creation fails with an explanatory error message (the
+   * 10 minutes (see JSON representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * Cluster creation fails with an explanatory error message (the
    * name of the executable that caused the error and the exceeded timeout
    * period) if the executable is not completed at end of the timeout period.
    * 
@@ -699,7 +705,9 @@ public Builder setExecutableFileBytes(com.google.protobuf.ByteString value) { * *
      * Optional. Amount of time executable has to complete. Default is
-     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * 10 minutes (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * Cluster creation fails with an explanatory error message (the
      * name of the executable that caused the error and the exceeded timeout
      * period) if the executable is not completed at end of the timeout period.
      * 
@@ -718,7 +726,9 @@ public boolean hasExecutionTimeout() { * *
      * Optional. Amount of time executable has to complete. Default is
-     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * 10 minutes (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * Cluster creation fails with an explanatory error message (the
      * name of the executable that caused the error and the exceeded timeout
      * period) if the executable is not completed at end of the timeout period.
      * 
@@ -743,7 +753,9 @@ public com.google.protobuf.Duration getExecutionTimeout() { * *
      * Optional. Amount of time executable has to complete. Default is
-     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * 10 minutes (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * Cluster creation fails with an explanatory error message (the
      * name of the executable that caused the error and the exceeded timeout
      * period) if the executable is not completed at end of the timeout period.
      * 
@@ -770,7 +782,9 @@ public Builder setExecutionTimeout(com.google.protobuf.Duration value) { * *
      * Optional. Amount of time executable has to complete. Default is
-     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * 10 minutes (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * Cluster creation fails with an explanatory error message (the
      * name of the executable that caused the error and the exceeded timeout
      * period) if the executable is not completed at end of the timeout period.
      * 
@@ -794,7 +808,9 @@ public Builder setExecutionTimeout(com.google.protobuf.Duration.Builder builderF * *
      * Optional. Amount of time executable has to complete. Default is
-     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * 10 minutes (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * Cluster creation fails with an explanatory error message (the
      * name of the executable that caused the error and the exceeded timeout
      * period) if the executable is not completed at end of the timeout period.
      * 
@@ -825,7 +841,9 @@ public Builder mergeExecutionTimeout(com.google.protobuf.Duration value) { * *
      * Optional. Amount of time executable has to complete. Default is
-     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * 10 minutes (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * Cluster creation fails with an explanatory error message (the
      * name of the executable that caused the error and the exceeded timeout
      * period) if the executable is not completed at end of the timeout period.
      * 
@@ -850,7 +868,9 @@ public Builder clearExecutionTimeout() { * *
      * Optional. Amount of time executable has to complete. Default is
-     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * 10 minutes (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * Cluster creation fails with an explanatory error message (the
      * name of the executable that caused the error and the exceeded timeout
      * period) if the executable is not completed at end of the timeout period.
      * 
@@ -869,7 +889,9 @@ public com.google.protobuf.Duration.Builder getExecutionTimeoutBuilder() { * *
      * Optional. Amount of time executable has to complete. Default is
-     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * 10 minutes (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * Cluster creation fails with an explanatory error message (the
      * name of the executable that caused the error and the exceeded timeout
      * period) if the executable is not completed at end of the timeout period.
      * 
@@ -892,7 +914,9 @@ public com.google.protobuf.DurationOrBuilder getExecutionTimeoutOrBuilder() { * *
      * Optional. Amount of time executable has to complete. Default is
-     * 10 minutes. Cluster creation fails with an explanatory error message (the
+     * 10 minutes (see JSON representation of
+     * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+     * Cluster creation fails with an explanatory error message (the
      * name of the executable that caused the error and the exceeded timeout
      * period) if the executable is not completed at end of the timeout period.
      * 
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeInitializationActionOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeInitializationActionOrBuilder.java index 716e0840..a16592c0 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeInitializationActionOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/NodeInitializationActionOrBuilder.java @@ -53,7 +53,9 @@ public interface NodeInitializationActionOrBuilder * *
    * Optional. Amount of time executable has to complete. Default is
-   * 10 minutes. Cluster creation fails with an explanatory error message (the
+   * 10 minutes (see JSON representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * Cluster creation fails with an explanatory error message (the
    * name of the executable that caused the error and the exceeded timeout
    * period) if the executable is not completed at end of the timeout period.
    * 
@@ -70,7 +72,9 @@ public interface NodeInitializationActionOrBuilder * *
    * Optional. Amount of time executable has to complete. Default is
-   * 10 minutes. Cluster creation fails with an explanatory error message (the
+   * 10 minutes (see JSON representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * Cluster creation fails with an explanatory error message (the
    * name of the executable that caused the error and the exceeded timeout
    * period) if the executable is not completed at end of the timeout period.
    * 
@@ -87,7 +91,9 @@ public interface NodeInitializationActionOrBuilder * *
    * Optional. Amount of time executable has to complete. Default is
-   * 10 minutes. Cluster creation fails with an explanatory error message (the
+   * 10 minutes (see JSON representation of
+   * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
+   * Cluster creation fails with an explanatory error message (the
    * name of the executable that caused the error and the exceeded timeout
    * period) if the executable is not completed at end of the timeout period.
    * 
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJob.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJob.java index 33d73bc8..db3dfd1f 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJob.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJob.java @@ -386,12 +386,6 @@ public com.google.protobuf.ByteString getStepIdBytes() { public static final int HADOOP_JOB_FIELD_NUMBER = 2; /** - * - * - *
-   * Job is a Hadoop job.
-   * 
- * * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; * * @return Whether the hadoopJob field is set. @@ -400,12 +394,6 @@ public boolean hasHadoopJob() { return jobTypeCase_ == 2; } /** - * - * - *
-   * Job is a Hadoop job.
-   * 
- * * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; * * @return The hadoopJob. @@ -416,15 +404,7 @@ public com.google.cloud.dataproc.v1.HadoopJob getHadoopJob() { } return com.google.cloud.dataproc.v1.HadoopJob.getDefaultInstance(); } - /** - * - * - *
-   * Job is a Hadoop job.
-   * 
- * - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; - */ + /** .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; */ public com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder() { if (jobTypeCase_ == 2) { return (com.google.cloud.dataproc.v1.HadoopJob) jobType_; @@ -434,12 +414,6 @@ public com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder() { public static final int SPARK_JOB_FIELD_NUMBER = 3; /** - * - * - *
-   * Job is a Spark job.
-   * 
- * * .google.cloud.dataproc.v1.SparkJob spark_job = 3; * * @return Whether the sparkJob field is set. @@ -448,12 +422,6 @@ public boolean hasSparkJob() { return jobTypeCase_ == 3; } /** - * - * - *
-   * Job is a Spark job.
-   * 
- * * .google.cloud.dataproc.v1.SparkJob spark_job = 3; * * @return The sparkJob. @@ -464,15 +432,7 @@ public com.google.cloud.dataproc.v1.SparkJob getSparkJob() { } return com.google.cloud.dataproc.v1.SparkJob.getDefaultInstance(); } - /** - * - * - *
-   * Job is a Spark job.
-   * 
- * - * .google.cloud.dataproc.v1.SparkJob spark_job = 3; - */ + /** .google.cloud.dataproc.v1.SparkJob spark_job = 3; */ public com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder() { if (jobTypeCase_ == 3) { return (com.google.cloud.dataproc.v1.SparkJob) jobType_; @@ -482,12 +442,6 @@ public com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder() { public static final int PYSPARK_JOB_FIELD_NUMBER = 4; /** - * - * - *
-   * Job is a Pyspark job.
-   * 
- * * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; * * @return Whether the pysparkJob field is set. @@ -496,12 +450,6 @@ public boolean hasPysparkJob() { return jobTypeCase_ == 4; } /** - * - * - *
-   * Job is a Pyspark job.
-   * 
- * * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; * * @return The pysparkJob. @@ -512,15 +460,7 @@ public com.google.cloud.dataproc.v1.PySparkJob getPysparkJob() { } return com.google.cloud.dataproc.v1.PySparkJob.getDefaultInstance(); } - /** - * - * - *
-   * Job is a Pyspark job.
-   * 
- * - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; - */ + /** .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; */ public com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder() { if (jobTypeCase_ == 4) { return (com.google.cloud.dataproc.v1.PySparkJob) jobType_; @@ -530,12 +470,6 @@ public com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder() public static final int HIVE_JOB_FIELD_NUMBER = 5; /** - * - * - *
-   * Job is a Hive job.
-   * 
- * * .google.cloud.dataproc.v1.HiveJob hive_job = 5; * * @return Whether the hiveJob field is set. @@ -544,12 +478,6 @@ public boolean hasHiveJob() { return jobTypeCase_ == 5; } /** - * - * - *
-   * Job is a Hive job.
-   * 
- * * .google.cloud.dataproc.v1.HiveJob hive_job = 5; * * @return The hiveJob. @@ -560,15 +488,7 @@ public com.google.cloud.dataproc.v1.HiveJob getHiveJob() { } return com.google.cloud.dataproc.v1.HiveJob.getDefaultInstance(); } - /** - * - * - *
-   * Job is a Hive job.
-   * 
- * - * .google.cloud.dataproc.v1.HiveJob hive_job = 5; - */ + /** .google.cloud.dataproc.v1.HiveJob hive_job = 5; */ public com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder() { if (jobTypeCase_ == 5) { return (com.google.cloud.dataproc.v1.HiveJob) jobType_; @@ -578,12 +498,6 @@ public com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder() { public static final int PIG_JOB_FIELD_NUMBER = 6; /** - * - * - *
-   * Job is a Pig job.
-   * 
- * * .google.cloud.dataproc.v1.PigJob pig_job = 6; * * @return Whether the pigJob field is set. @@ -592,12 +506,6 @@ public boolean hasPigJob() { return jobTypeCase_ == 6; } /** - * - * - *
-   * Job is a Pig job.
-   * 
- * * .google.cloud.dataproc.v1.PigJob pig_job = 6; * * @return The pigJob. @@ -608,15 +516,7 @@ public com.google.cloud.dataproc.v1.PigJob getPigJob() { } return com.google.cloud.dataproc.v1.PigJob.getDefaultInstance(); } - /** - * - * - *
-   * Job is a Pig job.
-   * 
- * - * .google.cloud.dataproc.v1.PigJob pig_job = 6; - */ + /** .google.cloud.dataproc.v1.PigJob pig_job = 6; */ public com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder() { if (jobTypeCase_ == 6) { return (com.google.cloud.dataproc.v1.PigJob) jobType_; @@ -626,12 +526,6 @@ public com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder() { public static final int SPARK_SQL_JOB_FIELD_NUMBER = 7; /** - * - * - *
-   * Job is a SparkSql job.
-   * 
- * * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; * * @return Whether the sparkSqlJob field is set. @@ -640,12 +534,6 @@ public boolean hasSparkSqlJob() { return jobTypeCase_ == 7; } /** - * - * - *
-   * Job is a SparkSql job.
-   * 
- * * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; * * @return The sparkSqlJob. @@ -656,15 +544,7 @@ public com.google.cloud.dataproc.v1.SparkSqlJob getSparkSqlJob() { } return com.google.cloud.dataproc.v1.SparkSqlJob.getDefaultInstance(); } - /** - * - * - *
-   * Job is a SparkSql job.
-   * 
- * - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; - */ + /** .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; */ public com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder() { if (jobTypeCase_ == 7) { return (com.google.cloud.dataproc.v1.SparkSqlJob) jobType_; @@ -1676,12 +1556,6 @@ public Builder setStepIdBytes(com.google.protobuf.ByteString value) { com.google.cloud.dataproc.v1.HadoopJobOrBuilder> hadoopJobBuilder_; /** - * - * - *
-     * Job is a Hadoop job.
-     * 
- * * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; * * @return Whether the hadoopJob field is set. @@ -1690,12 +1564,6 @@ public boolean hasHadoopJob() { return jobTypeCase_ == 2; } /** - * - * - *
-     * Job is a Hadoop job.
-     * 
- * * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; * * @return The hadoopJob. @@ -1713,15 +1581,7 @@ public com.google.cloud.dataproc.v1.HadoopJob getHadoopJob() { return com.google.cloud.dataproc.v1.HadoopJob.getDefaultInstance(); } } - /** - * - * - *
-     * Job is a Hadoop job.
-     * 
- * - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; - */ + /** .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; */ public Builder setHadoopJob(com.google.cloud.dataproc.v1.HadoopJob value) { if (hadoopJobBuilder_ == null) { if (value == null) { @@ -1735,15 +1595,7 @@ public Builder setHadoopJob(com.google.cloud.dataproc.v1.HadoopJob value) { jobTypeCase_ = 2; return this; } - /** - * - * - *
-     * Job is a Hadoop job.
-     * 
- * - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; - */ + /** .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; */ public Builder setHadoopJob(com.google.cloud.dataproc.v1.HadoopJob.Builder builderForValue) { if (hadoopJobBuilder_ == null) { jobType_ = builderForValue.build(); @@ -1754,15 +1606,7 @@ public Builder setHadoopJob(com.google.cloud.dataproc.v1.HadoopJob.Builder build jobTypeCase_ = 2; return this; } - /** - * - * - *
-     * Job is a Hadoop job.
-     * 
- * - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; - */ + /** .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; */ public Builder mergeHadoopJob(com.google.cloud.dataproc.v1.HadoopJob value) { if (hadoopJobBuilder_ == null) { if (jobTypeCase_ == 2 @@ -1785,15 +1629,7 @@ public Builder mergeHadoopJob(com.google.cloud.dataproc.v1.HadoopJob value) { jobTypeCase_ = 2; return this; } - /** - * - * - *
-     * Job is a Hadoop job.
-     * 
- * - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; - */ + /** .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; */ public Builder clearHadoopJob() { if (hadoopJobBuilder_ == null) { if (jobTypeCase_ == 2) { @@ -1810,27 +1646,11 @@ public Builder clearHadoopJob() { } return this; } - /** - * - * - *
-     * Job is a Hadoop job.
-     * 
- * - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; - */ + /** .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; */ public com.google.cloud.dataproc.v1.HadoopJob.Builder getHadoopJobBuilder() { return getHadoopJobFieldBuilder().getBuilder(); } - /** - * - * - *
-     * Job is a Hadoop job.
-     * 
- * - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; - */ + /** .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; */ public com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder() { if ((jobTypeCase_ == 2) && (hadoopJobBuilder_ != null)) { return hadoopJobBuilder_.getMessageOrBuilder(); @@ -1841,15 +1661,7 @@ public com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder() { return com.google.cloud.dataproc.v1.HadoopJob.getDefaultInstance(); } } - /** - * - * - *
-     * Job is a Hadoop job.
-     * 
- * - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; - */ + /** .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.HadoopJob, com.google.cloud.dataproc.v1.HadoopJob.Builder, @@ -1881,12 +1693,6 @@ public com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder() { com.google.cloud.dataproc.v1.SparkJobOrBuilder> sparkJobBuilder_; /** - * - * - *
-     * Job is a Spark job.
-     * 
- * * .google.cloud.dataproc.v1.SparkJob spark_job = 3; * * @return Whether the sparkJob field is set. @@ -1895,12 +1701,6 @@ public boolean hasSparkJob() { return jobTypeCase_ == 3; } /** - * - * - *
-     * Job is a Spark job.
-     * 
- * * .google.cloud.dataproc.v1.SparkJob spark_job = 3; * * @return The sparkJob. @@ -1918,15 +1718,7 @@ public com.google.cloud.dataproc.v1.SparkJob getSparkJob() { return com.google.cloud.dataproc.v1.SparkJob.getDefaultInstance(); } } - /** - * - * - *
-     * Job is a Spark job.
-     * 
- * - * .google.cloud.dataproc.v1.SparkJob spark_job = 3; - */ + /** .google.cloud.dataproc.v1.SparkJob spark_job = 3; */ public Builder setSparkJob(com.google.cloud.dataproc.v1.SparkJob value) { if (sparkJobBuilder_ == null) { if (value == null) { @@ -1940,15 +1732,7 @@ public Builder setSparkJob(com.google.cloud.dataproc.v1.SparkJob value) { jobTypeCase_ = 3; return this; } - /** - * - * - *
-     * Job is a Spark job.
-     * 
- * - * .google.cloud.dataproc.v1.SparkJob spark_job = 3; - */ + /** .google.cloud.dataproc.v1.SparkJob spark_job = 3; */ public Builder setSparkJob(com.google.cloud.dataproc.v1.SparkJob.Builder builderForValue) { if (sparkJobBuilder_ == null) { jobType_ = builderForValue.build(); @@ -1959,15 +1743,7 @@ public Builder setSparkJob(com.google.cloud.dataproc.v1.SparkJob.Builder builder jobTypeCase_ = 3; return this; } - /** - * - * - *
-     * Job is a Spark job.
-     * 
- * - * .google.cloud.dataproc.v1.SparkJob spark_job = 3; - */ + /** .google.cloud.dataproc.v1.SparkJob spark_job = 3; */ public Builder mergeSparkJob(com.google.cloud.dataproc.v1.SparkJob value) { if (sparkJobBuilder_ == null) { if (jobTypeCase_ == 3 @@ -1990,15 +1766,7 @@ public Builder mergeSparkJob(com.google.cloud.dataproc.v1.SparkJob value) { jobTypeCase_ = 3; return this; } - /** - * - * - *
-     * Job is a Spark job.
-     * 
- * - * .google.cloud.dataproc.v1.SparkJob spark_job = 3; - */ + /** .google.cloud.dataproc.v1.SparkJob spark_job = 3; */ public Builder clearSparkJob() { if (sparkJobBuilder_ == null) { if (jobTypeCase_ == 3) { @@ -2015,27 +1783,11 @@ public Builder clearSparkJob() { } return this; } - /** - * - * - *
-     * Job is a Spark job.
-     * 
- * - * .google.cloud.dataproc.v1.SparkJob spark_job = 3; - */ + /** .google.cloud.dataproc.v1.SparkJob spark_job = 3; */ public com.google.cloud.dataproc.v1.SparkJob.Builder getSparkJobBuilder() { return getSparkJobFieldBuilder().getBuilder(); } - /** - * - * - *
-     * Job is a Spark job.
-     * 
- * - * .google.cloud.dataproc.v1.SparkJob spark_job = 3; - */ + /** .google.cloud.dataproc.v1.SparkJob spark_job = 3; */ public com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder() { if ((jobTypeCase_ == 3) && (sparkJobBuilder_ != null)) { return sparkJobBuilder_.getMessageOrBuilder(); @@ -2046,15 +1798,7 @@ public com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder() { return com.google.cloud.dataproc.v1.SparkJob.getDefaultInstance(); } } - /** - * - * - *
-     * Job is a Spark job.
-     * 
- * - * .google.cloud.dataproc.v1.SparkJob spark_job = 3; - */ + /** .google.cloud.dataproc.v1.SparkJob spark_job = 3; */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.SparkJob, com.google.cloud.dataproc.v1.SparkJob.Builder, @@ -2086,12 +1830,6 @@ public com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder() { com.google.cloud.dataproc.v1.PySparkJobOrBuilder> pysparkJobBuilder_; /** - * - * - *
-     * Job is a Pyspark job.
-     * 
- * * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; * * @return Whether the pysparkJob field is set. @@ -2100,12 +1838,6 @@ public boolean hasPysparkJob() { return jobTypeCase_ == 4; } /** - * - * - *
-     * Job is a Pyspark job.
-     * 
- * * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; * * @return The pysparkJob. @@ -2123,15 +1855,7 @@ public com.google.cloud.dataproc.v1.PySparkJob getPysparkJob() { return com.google.cloud.dataproc.v1.PySparkJob.getDefaultInstance(); } } - /** - * - * - *
-     * Job is a Pyspark job.
-     * 
- * - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; - */ + /** .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; */ public Builder setPysparkJob(com.google.cloud.dataproc.v1.PySparkJob value) { if (pysparkJobBuilder_ == null) { if (value == null) { @@ -2145,15 +1869,7 @@ public Builder setPysparkJob(com.google.cloud.dataproc.v1.PySparkJob value) { jobTypeCase_ = 4; return this; } - /** - * - * - *
-     * Job is a Pyspark job.
-     * 
- * - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; - */ + /** .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; */ public Builder setPysparkJob(com.google.cloud.dataproc.v1.PySparkJob.Builder builderForValue) { if (pysparkJobBuilder_ == null) { jobType_ = builderForValue.build(); @@ -2164,15 +1880,7 @@ public Builder setPysparkJob(com.google.cloud.dataproc.v1.PySparkJob.Builder bui jobTypeCase_ = 4; return this; } - /** - * - * - *
-     * Job is a Pyspark job.
-     * 
- * - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; - */ + /** .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; */ public Builder mergePysparkJob(com.google.cloud.dataproc.v1.PySparkJob value) { if (pysparkJobBuilder_ == null) { if (jobTypeCase_ == 4 @@ -2195,15 +1903,7 @@ public Builder mergePysparkJob(com.google.cloud.dataproc.v1.PySparkJob value) { jobTypeCase_ = 4; return this; } - /** - * - * - *
-     * Job is a Pyspark job.
-     * 
- * - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; - */ + /** .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; */ public Builder clearPysparkJob() { if (pysparkJobBuilder_ == null) { if (jobTypeCase_ == 4) { @@ -2220,27 +1920,11 @@ public Builder clearPysparkJob() { } return this; } - /** - * - * - *
-     * Job is a Pyspark job.
-     * 
- * - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; - */ + /** .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; */ public com.google.cloud.dataproc.v1.PySparkJob.Builder getPysparkJobBuilder() { return getPysparkJobFieldBuilder().getBuilder(); } - /** - * - * - *
-     * Job is a Pyspark job.
-     * 
- * - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; - */ + /** .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; */ public com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder() { if ((jobTypeCase_ == 4) && (pysparkJobBuilder_ != null)) { return pysparkJobBuilder_.getMessageOrBuilder(); @@ -2251,15 +1935,7 @@ public com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder() return com.google.cloud.dataproc.v1.PySparkJob.getDefaultInstance(); } } - /** - * - * - *
-     * Job is a Pyspark job.
-     * 
- * - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; - */ + /** .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.PySparkJob, com.google.cloud.dataproc.v1.PySparkJob.Builder, @@ -2291,12 +1967,6 @@ public com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder() com.google.cloud.dataproc.v1.HiveJobOrBuilder> hiveJobBuilder_; /** - * - * - *
-     * Job is a Hive job.
-     * 
- * * .google.cloud.dataproc.v1.HiveJob hive_job = 5; * * @return Whether the hiveJob field is set. @@ -2305,12 +1975,6 @@ public boolean hasHiveJob() { return jobTypeCase_ == 5; } /** - * - * - *
-     * Job is a Hive job.
-     * 
- * * .google.cloud.dataproc.v1.HiveJob hive_job = 5; * * @return The hiveJob. @@ -2328,15 +1992,7 @@ public com.google.cloud.dataproc.v1.HiveJob getHiveJob() { return com.google.cloud.dataproc.v1.HiveJob.getDefaultInstance(); } } - /** - * - * - *
-     * Job is a Hive job.
-     * 
- * - * .google.cloud.dataproc.v1.HiveJob hive_job = 5; - */ + /** .google.cloud.dataproc.v1.HiveJob hive_job = 5; */ public Builder setHiveJob(com.google.cloud.dataproc.v1.HiveJob value) { if (hiveJobBuilder_ == null) { if (value == null) { @@ -2350,15 +2006,7 @@ public Builder setHiveJob(com.google.cloud.dataproc.v1.HiveJob value) { jobTypeCase_ = 5; return this; } - /** - * - * - *
-     * Job is a Hive job.
-     * 
- * - * .google.cloud.dataproc.v1.HiveJob hive_job = 5; - */ + /** .google.cloud.dataproc.v1.HiveJob hive_job = 5; */ public Builder setHiveJob(com.google.cloud.dataproc.v1.HiveJob.Builder builderForValue) { if (hiveJobBuilder_ == null) { jobType_ = builderForValue.build(); @@ -2369,15 +2017,7 @@ public Builder setHiveJob(com.google.cloud.dataproc.v1.HiveJob.Builder builderFo jobTypeCase_ = 5; return this; } - /** - * - * - *
-     * Job is a Hive job.
-     * 
- * - * .google.cloud.dataproc.v1.HiveJob hive_job = 5; - */ + /** .google.cloud.dataproc.v1.HiveJob hive_job = 5; */ public Builder mergeHiveJob(com.google.cloud.dataproc.v1.HiveJob value) { if (hiveJobBuilder_ == null) { if (jobTypeCase_ == 5 @@ -2400,15 +2040,7 @@ public Builder mergeHiveJob(com.google.cloud.dataproc.v1.HiveJob value) { jobTypeCase_ = 5; return this; } - /** - * - * - *
-     * Job is a Hive job.
-     * 
- * - * .google.cloud.dataproc.v1.HiveJob hive_job = 5; - */ + /** .google.cloud.dataproc.v1.HiveJob hive_job = 5; */ public Builder clearHiveJob() { if (hiveJobBuilder_ == null) { if (jobTypeCase_ == 5) { @@ -2425,27 +2057,11 @@ public Builder clearHiveJob() { } return this; } - /** - * - * - *
-     * Job is a Hive job.
-     * 
- * - * .google.cloud.dataproc.v1.HiveJob hive_job = 5; - */ + /** .google.cloud.dataproc.v1.HiveJob hive_job = 5; */ public com.google.cloud.dataproc.v1.HiveJob.Builder getHiveJobBuilder() { return getHiveJobFieldBuilder().getBuilder(); } - /** - * - * - *
-     * Job is a Hive job.
-     * 
- * - * .google.cloud.dataproc.v1.HiveJob hive_job = 5; - */ + /** .google.cloud.dataproc.v1.HiveJob hive_job = 5; */ public com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder() { if ((jobTypeCase_ == 5) && (hiveJobBuilder_ != null)) { return hiveJobBuilder_.getMessageOrBuilder(); @@ -2456,15 +2072,7 @@ public com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder() { return com.google.cloud.dataproc.v1.HiveJob.getDefaultInstance(); } } - /** - * - * - *
-     * Job is a Hive job.
-     * 
- * - * .google.cloud.dataproc.v1.HiveJob hive_job = 5; - */ + /** .google.cloud.dataproc.v1.HiveJob hive_job = 5; */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.HiveJob, com.google.cloud.dataproc.v1.HiveJob.Builder, @@ -2494,12 +2102,6 @@ public com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder() { com.google.cloud.dataproc.v1.PigJobOrBuilder> pigJobBuilder_; /** - * - * - *
-     * Job is a Pig job.
-     * 
- * * .google.cloud.dataproc.v1.PigJob pig_job = 6; * * @return Whether the pigJob field is set. @@ -2508,12 +2110,6 @@ public boolean hasPigJob() { return jobTypeCase_ == 6; } /** - * - * - *
-     * Job is a Pig job.
-     * 
- * * .google.cloud.dataproc.v1.PigJob pig_job = 6; * * @return The pigJob. @@ -2531,15 +2127,7 @@ public com.google.cloud.dataproc.v1.PigJob getPigJob() { return com.google.cloud.dataproc.v1.PigJob.getDefaultInstance(); } } - /** - * - * - *
-     * Job is a Pig job.
-     * 
- * - * .google.cloud.dataproc.v1.PigJob pig_job = 6; - */ + /** .google.cloud.dataproc.v1.PigJob pig_job = 6; */ public Builder setPigJob(com.google.cloud.dataproc.v1.PigJob value) { if (pigJobBuilder_ == null) { if (value == null) { @@ -2553,15 +2141,7 @@ public Builder setPigJob(com.google.cloud.dataproc.v1.PigJob value) { jobTypeCase_ = 6; return this; } - /** - * - * - *
-     * Job is a Pig job.
-     * 
- * - * .google.cloud.dataproc.v1.PigJob pig_job = 6; - */ + /** .google.cloud.dataproc.v1.PigJob pig_job = 6; */ public Builder setPigJob(com.google.cloud.dataproc.v1.PigJob.Builder builderForValue) { if (pigJobBuilder_ == null) { jobType_ = builderForValue.build(); @@ -2572,15 +2152,7 @@ public Builder setPigJob(com.google.cloud.dataproc.v1.PigJob.Builder builderForV jobTypeCase_ = 6; return this; } - /** - * - * - *
-     * Job is a Pig job.
-     * 
- * - * .google.cloud.dataproc.v1.PigJob pig_job = 6; - */ + /** .google.cloud.dataproc.v1.PigJob pig_job = 6; */ public Builder mergePigJob(com.google.cloud.dataproc.v1.PigJob value) { if (pigJobBuilder_ == null) { if (jobTypeCase_ == 6 @@ -2603,15 +2175,7 @@ public Builder mergePigJob(com.google.cloud.dataproc.v1.PigJob value) { jobTypeCase_ = 6; return this; } - /** - * - * - *
-     * Job is a Pig job.
-     * 
- * - * .google.cloud.dataproc.v1.PigJob pig_job = 6; - */ + /** .google.cloud.dataproc.v1.PigJob pig_job = 6; */ public Builder clearPigJob() { if (pigJobBuilder_ == null) { if (jobTypeCase_ == 6) { @@ -2628,27 +2192,11 @@ public Builder clearPigJob() { } return this; } - /** - * - * - *
-     * Job is a Pig job.
-     * 
- * - * .google.cloud.dataproc.v1.PigJob pig_job = 6; - */ + /** .google.cloud.dataproc.v1.PigJob pig_job = 6; */ public com.google.cloud.dataproc.v1.PigJob.Builder getPigJobBuilder() { return getPigJobFieldBuilder().getBuilder(); } - /** - * - * - *
-     * Job is a Pig job.
-     * 
- * - * .google.cloud.dataproc.v1.PigJob pig_job = 6; - */ + /** .google.cloud.dataproc.v1.PigJob pig_job = 6; */ public com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder() { if ((jobTypeCase_ == 6) && (pigJobBuilder_ != null)) { return pigJobBuilder_.getMessageOrBuilder(); @@ -2659,15 +2207,7 @@ public com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder() { return com.google.cloud.dataproc.v1.PigJob.getDefaultInstance(); } } - /** - * - * - *
-     * Job is a Pig job.
-     * 
- * - * .google.cloud.dataproc.v1.PigJob pig_job = 6; - */ + /** .google.cloud.dataproc.v1.PigJob pig_job = 6; */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.PigJob, com.google.cloud.dataproc.v1.PigJob.Builder, @@ -2697,12 +2237,6 @@ public com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder() { com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder> sparkSqlJobBuilder_; /** - * - * - *
-     * Job is a SparkSql job.
-     * 
- * * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; * * @return Whether the sparkSqlJob field is set. @@ -2711,12 +2245,6 @@ public boolean hasSparkSqlJob() { return jobTypeCase_ == 7; } /** - * - * - *
-     * Job is a SparkSql job.
-     * 
- * * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; * * @return The sparkSqlJob. @@ -2734,15 +2262,7 @@ public com.google.cloud.dataproc.v1.SparkSqlJob getSparkSqlJob() { return com.google.cloud.dataproc.v1.SparkSqlJob.getDefaultInstance(); } } - /** - * - * - *
-     * Job is a SparkSql job.
-     * 
- * - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; - */ + /** .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; */ public Builder setSparkSqlJob(com.google.cloud.dataproc.v1.SparkSqlJob value) { if (sparkSqlJobBuilder_ == null) { if (value == null) { @@ -2756,15 +2276,7 @@ public Builder setSparkSqlJob(com.google.cloud.dataproc.v1.SparkSqlJob value) { jobTypeCase_ = 7; return this; } - /** - * - * - *
-     * Job is a SparkSql job.
-     * 
- * - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; - */ + /** .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; */ public Builder setSparkSqlJob( com.google.cloud.dataproc.v1.SparkSqlJob.Builder builderForValue) { if (sparkSqlJobBuilder_ == null) { @@ -2776,15 +2288,7 @@ public Builder setSparkSqlJob( jobTypeCase_ = 7; return this; } - /** - * - * - *
-     * Job is a SparkSql job.
-     * 
- * - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; - */ + /** .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; */ public Builder mergeSparkSqlJob(com.google.cloud.dataproc.v1.SparkSqlJob value) { if (sparkSqlJobBuilder_ == null) { if (jobTypeCase_ == 7 @@ -2807,15 +2311,7 @@ public Builder mergeSparkSqlJob(com.google.cloud.dataproc.v1.SparkSqlJob value) jobTypeCase_ = 7; return this; } - /** - * - * - *
-     * Job is a SparkSql job.
-     * 
- * - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; - */ + /** .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; */ public Builder clearSparkSqlJob() { if (sparkSqlJobBuilder_ == null) { if (jobTypeCase_ == 7) { @@ -2832,27 +2328,11 @@ public Builder clearSparkSqlJob() { } return this; } - /** - * - * - *
-     * Job is a SparkSql job.
-     * 
- * - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; - */ + /** .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; */ public com.google.cloud.dataproc.v1.SparkSqlJob.Builder getSparkSqlJobBuilder() { return getSparkSqlJobFieldBuilder().getBuilder(); } - /** - * - * - *
-     * Job is a SparkSql job.
-     * 
- * - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; - */ + /** .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; */ public com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder() { if ((jobTypeCase_ == 7) && (sparkSqlJobBuilder_ != null)) { return sparkSqlJobBuilder_.getMessageOrBuilder(); @@ -2863,15 +2343,7 @@ public com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder return com.google.cloud.dataproc.v1.SparkSqlJob.getDefaultInstance(); } } - /** - * - * - *
-     * Job is a SparkSql job.
-     * 
- * - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; - */ + /** .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dataproc.v1.SparkSqlJob, com.google.cloud.dataproc.v1.SparkSqlJob.Builder, diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJobOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJobOrBuilder.java index abdc5e94..00b7799d 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJobOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJobOrBuilder.java @@ -65,213 +65,93 @@ public interface OrderedJobOrBuilder com.google.protobuf.ByteString getStepIdBytes(); /** - * - * - *
-   * Job is a Hadoop job.
-   * 
- * * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; * * @return Whether the hadoopJob field is set. */ boolean hasHadoopJob(); /** - * - * - *
-   * Job is a Hadoop job.
-   * 
- * * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; * * @return The hadoopJob. */ com.google.cloud.dataproc.v1.HadoopJob getHadoopJob(); - /** - * - * - *
-   * Job is a Hadoop job.
-   * 
- * - * .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; - */ + /** .google.cloud.dataproc.v1.HadoopJob hadoop_job = 2; */ com.google.cloud.dataproc.v1.HadoopJobOrBuilder getHadoopJobOrBuilder(); /** - * - * - *
-   * Job is a Spark job.
-   * 
- * * .google.cloud.dataproc.v1.SparkJob spark_job = 3; * * @return Whether the sparkJob field is set. */ boolean hasSparkJob(); /** - * - * - *
-   * Job is a Spark job.
-   * 
- * * .google.cloud.dataproc.v1.SparkJob spark_job = 3; * * @return The sparkJob. */ com.google.cloud.dataproc.v1.SparkJob getSparkJob(); - /** - * - * - *
-   * Job is a Spark job.
-   * 
- * - * .google.cloud.dataproc.v1.SparkJob spark_job = 3; - */ + /** .google.cloud.dataproc.v1.SparkJob spark_job = 3; */ com.google.cloud.dataproc.v1.SparkJobOrBuilder getSparkJobOrBuilder(); /** - * - * - *
-   * Job is a Pyspark job.
-   * 
- * * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; * * @return Whether the pysparkJob field is set. */ boolean hasPysparkJob(); /** - * - * - *
-   * Job is a Pyspark job.
-   * 
- * * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; * * @return The pysparkJob. */ com.google.cloud.dataproc.v1.PySparkJob getPysparkJob(); - /** - * - * - *
-   * Job is a Pyspark job.
-   * 
- * - * .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; - */ + /** .google.cloud.dataproc.v1.PySparkJob pyspark_job = 4; */ com.google.cloud.dataproc.v1.PySparkJobOrBuilder getPysparkJobOrBuilder(); /** - * - * - *
-   * Job is a Hive job.
-   * 
- * * .google.cloud.dataproc.v1.HiveJob hive_job = 5; * * @return Whether the hiveJob field is set. */ boolean hasHiveJob(); /** - * - * - *
-   * Job is a Hive job.
-   * 
- * * .google.cloud.dataproc.v1.HiveJob hive_job = 5; * * @return The hiveJob. */ com.google.cloud.dataproc.v1.HiveJob getHiveJob(); - /** - * - * - *
-   * Job is a Hive job.
-   * 
- * - * .google.cloud.dataproc.v1.HiveJob hive_job = 5; - */ + /** .google.cloud.dataproc.v1.HiveJob hive_job = 5; */ com.google.cloud.dataproc.v1.HiveJobOrBuilder getHiveJobOrBuilder(); /** - * - * - *
-   * Job is a Pig job.
-   * 
- * * .google.cloud.dataproc.v1.PigJob pig_job = 6; * * @return Whether the pigJob field is set. */ boolean hasPigJob(); /** - * - * - *
-   * Job is a Pig job.
-   * 
- * * .google.cloud.dataproc.v1.PigJob pig_job = 6; * * @return The pigJob. */ com.google.cloud.dataproc.v1.PigJob getPigJob(); - /** - * - * - *
-   * Job is a Pig job.
-   * 
- * - * .google.cloud.dataproc.v1.PigJob pig_job = 6; - */ + /** .google.cloud.dataproc.v1.PigJob pig_job = 6; */ com.google.cloud.dataproc.v1.PigJobOrBuilder getPigJobOrBuilder(); /** - * - * - *
-   * Job is a SparkSql job.
-   * 
- * * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; * * @return Whether the sparkSqlJob field is set. */ boolean hasSparkSqlJob(); /** - * - * - *
-   * Job is a SparkSql job.
-   * 
- * * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; * * @return The sparkSqlJob. */ com.google.cloud.dataproc.v1.SparkSqlJob getSparkSqlJob(); - /** - * - * - *
-   * Job is a SparkSql job.
-   * 
- * - * .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; - */ + /** .google.cloud.dataproc.v1.SparkSqlJob spark_sql_job = 7; */ com.google.cloud.dataproc.v1.SparkSqlJobOrBuilder getSparkSqlJobOrBuilder(); /** diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PrestoJob.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PrestoJob.java new file mode 100644 index 00000000..ef1ab155 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PrestoJob.java @@ -0,0 +1,2240 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/jobs.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * A Dataproc job for running [Presto](https://prestosql.io/) queries
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.PrestoJob} + */ +public final class PrestoJob extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.PrestoJob) + PrestoJobOrBuilder { + private static final long serialVersionUID = 0L; + // Use PrestoJob.newBuilder() to construct. + private PrestoJob(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private PrestoJob() { + outputFormat_ = ""; + clientTags_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new PrestoJob(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private PrestoJob( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + queriesCase_ = 1; + queries_ = s; + break; + } + case 18: + { + com.google.cloud.dataproc.v1.QueryList.Builder subBuilder = null; + if (queriesCase_ == 2) { + subBuilder = ((com.google.cloud.dataproc.v1.QueryList) queries_).toBuilder(); + } + queries_ = + input.readMessage( + com.google.cloud.dataproc.v1.QueryList.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.dataproc.v1.QueryList) queries_); + queries_ = subBuilder.buildPartial(); + } + queriesCase_ = 2; + break; + } + case 24: + { + continueOnFailure_ = input.readBool(); + break; + } + case 34: + { + java.lang.String s = input.readStringRequireUtf8(); + + outputFormat_ = s; + break; + } + case 42: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + clientTags_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + clientTags_.add(s); + break; + } + case 50: + { + if (!((mutable_bitField0_ & 0x00000002) != 0)) { + properties_ = + com.google.protobuf.MapField.newMapField( + PropertiesDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000002; + } + com.google.protobuf.MapEntry properties__ = + input.readMessage( + PropertiesDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + properties_.getMutableMap().put(properties__.getKey(), properties__.getValue()); + break; + } + case 58: + { + com.google.cloud.dataproc.v1.LoggingConfig.Builder subBuilder = null; + if (loggingConfig_ != null) { + subBuilder = loggingConfig_.toBuilder(); + } + loggingConfig_ = + input.readMessage( + com.google.cloud.dataproc.v1.LoggingConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(loggingConfig_); + loggingConfig_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + clientTags_ = clientTags_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.JobsProto + .internal_static_google_cloud_dataproc_v1_PrestoJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField(int number) { + switch (number) { + case 6: + return internalGetProperties(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.JobsProto + .internal_static_google_cloud_dataproc_v1_PrestoJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.PrestoJob.class, + com.google.cloud.dataproc.v1.PrestoJob.Builder.class); + } + + private int queriesCase_ = 0; + private java.lang.Object queries_; + + public enum QueriesCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + QUERY_FILE_URI(1), + QUERY_LIST(2), + QUERIES_NOT_SET(0); + private final int value; + + private QueriesCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static QueriesCase valueOf(int value) { + return forNumber(value); + } + + public static QueriesCase forNumber(int value) { + switch (value) { + case 1: + return QUERY_FILE_URI; + case 2: + return QUERY_LIST; + case 0: + return QUERIES_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public QueriesCase getQueriesCase() { + return QueriesCase.forNumber(queriesCase_); + } + + public static final int QUERY_FILE_URI_FIELD_NUMBER = 1; + /** + * + * + *
+   * The HCFS URI of the script that contains SQL queries.
+   * 
+ * + * string query_file_uri = 1; + * + * @return The queryFileUri. + */ + public java.lang.String getQueryFileUri() { + java.lang.Object ref = ""; + if (queriesCase_ == 1) { + ref = queries_; + } + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (queriesCase_ == 1) { + queries_ = s; + } + return s; + } + } + /** + * + * + *
+   * The HCFS URI of the script that contains SQL queries.
+   * 
+ * + * string query_file_uri = 1; + * + * @return The bytes for queryFileUri. + */ + public com.google.protobuf.ByteString getQueryFileUriBytes() { + java.lang.Object ref = ""; + if (queriesCase_ == 1) { + ref = queries_; + } + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (queriesCase_ == 1) { + queries_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int QUERY_LIST_FIELD_NUMBER = 2; + /** + * + * + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1.QueryList query_list = 2; + * + * @return Whether the queryList field is set. + */ + public boolean hasQueryList() { + return queriesCase_ == 2; + } + /** + * + * + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1.QueryList query_list = 2; + * + * @return The queryList. + */ + public com.google.cloud.dataproc.v1.QueryList getQueryList() { + if (queriesCase_ == 2) { + return (com.google.cloud.dataproc.v1.QueryList) queries_; + } + return com.google.cloud.dataproc.v1.QueryList.getDefaultInstance(); + } + /** + * + * + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1.QueryListOrBuilder getQueryListOrBuilder() { + if (queriesCase_ == 2) { + return (com.google.cloud.dataproc.v1.QueryList) queries_; + } + return com.google.cloud.dataproc.v1.QueryList.getDefaultInstance(); + } + + public static final int CONTINUE_ON_FAILURE_FIELD_NUMBER = 3; + private boolean continueOnFailure_; + /** + * + * + *
+   * Optional. Whether to continue executing queries if a query fails.
+   * The default value is `false`. Setting to `true` can be useful when
+   * executing independent parallel queries.
+   * 
+ * + * bool continue_on_failure = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The continueOnFailure. + */ + public boolean getContinueOnFailure() { + return continueOnFailure_; + } + + public static final int OUTPUT_FORMAT_FIELD_NUMBER = 4; + private volatile java.lang.Object outputFormat_; + /** + * + * + *
+   * Optional. The format in which query output will be displayed. See the
+   * Presto documentation for supported output formats
+   * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The outputFormat. + */ + public java.lang.String getOutputFormat() { + java.lang.Object ref = outputFormat_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + outputFormat_ = s; + return s; + } + } + /** + * + * + *
+   * Optional. The format in which query output will be displayed. See the
+   * Presto documentation for supported output formats
+   * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for outputFormat. + */ + public com.google.protobuf.ByteString getOutputFormatBytes() { + java.lang.Object ref = outputFormat_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + outputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CLIENT_TAGS_FIELD_NUMBER = 5; + private com.google.protobuf.LazyStringList clientTags_; + /** + * + * + *
+   * Optional. Presto client tags to attach to this query
+   * 
+ * + * repeated string client_tags = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the clientTags. + */ + public com.google.protobuf.ProtocolStringList getClientTagsList() { + return clientTags_; + } + /** + * + * + *
+   * Optional. Presto client tags to attach to this query
+   * 
+ * + * repeated string client_tags = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of clientTags. + */ + public int getClientTagsCount() { + return clientTags_.size(); + } + /** + * + * + *
+   * Optional. Presto client tags to attach to this query
+   * 
+ * + * repeated string client_tags = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The clientTags at the given index. + */ + public java.lang.String getClientTags(int index) { + return clientTags_.get(index); + } + /** + * + * + *
+   * Optional. Presto client tags to attach to this query
+   * 
+ * + * repeated string client_tags = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the clientTags at the given index. + */ + public com.google.protobuf.ByteString getClientTagsBytes(int index) { + return clientTags_.getByteString(index); + } + + public static final int PROPERTIES_FIELD_NUMBER = 6; + + private static final class PropertiesDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.dataproc.v1.JobsProto + .internal_static_google_cloud_dataproc_v1_PrestoJob_PropertiesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + private com.google.protobuf.MapField properties_; + + private com.google.protobuf.MapField internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField(PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + * + * + *
+   * Optional. A mapping of property names to values. Used to set Presto
+   * [session properties](https://prestodb.io/docs/current/sql/set-session.html)
+   * Equivalent to using the --session flag in the Presto CLI
+   * 
+ * + * map<string, string> properties = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public boolean containsProperties(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + return internalGetProperties().getMap().containsKey(key); + } + /** Use {@link #getPropertiesMap()} instead. */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + * + * + *
+   * Optional. A mapping of property names to values. Used to set Presto
+   * [session properties](https://prestodb.io/docs/current/sql/set-session.html)
+   * Equivalent to using the --session flag in the Presto CLI
+   * 
+ * + * map<string, string> properties = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + * + * + *
+   * Optional. A mapping of property names to values. Used to set Presto
+   * [session properties](https://prestodb.io/docs/current/sql/set-session.html)
+   * Equivalent to using the --session flag in the Presto CLI
+   * 
+ * + * map<string, string> properties = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.lang.String getPropertiesOrDefault( + java.lang.String key, java.lang.String defaultValue) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+   * Optional. A mapping of property names to values. Used to set Presto
+   * [session properties](https://prestodb.io/docs/current/sql/set-session.html)
+   * Equivalent to using the --session flag in the Presto CLI
+   * 
+ * + * map<string, string> properties = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.lang.String getPropertiesOrThrow(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int LOGGING_CONFIG_FIELD_NUMBER = 7; + private com.google.cloud.dataproc.v1.LoggingConfig loggingConfig_; + /** + * + * + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the loggingConfig field is set. + */ + public boolean hasLoggingConfig() { + return loggingConfig_ != null; + } + /** + * + * + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The loggingConfig. + */ + public com.google.cloud.dataproc.v1.LoggingConfig getLoggingConfig() { + return loggingConfig_ == null + ? com.google.cloud.dataproc.v1.LoggingConfig.getDefaultInstance() + : loggingConfig_; + } + /** + * + * + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.LoggingConfigOrBuilder getLoggingConfigOrBuilder() { + return getLoggingConfig(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (queriesCase_ == 1) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, queries_); + } + if (queriesCase_ == 2) { + output.writeMessage(2, (com.google.cloud.dataproc.v1.QueryList) queries_); + } + if (continueOnFailure_ != false) { + output.writeBool(3, continueOnFailure_); + } + if (!getOutputFormatBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, outputFormat_); + } + for (int i = 0; i < clientTags_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, clientTags_.getRaw(i)); + } + com.google.protobuf.GeneratedMessageV3.serializeStringMapTo( + output, internalGetProperties(), PropertiesDefaultEntryHolder.defaultEntry, 6); + if (loggingConfig_ != null) { + output.writeMessage(7, getLoggingConfig()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (queriesCase_ == 1) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, queries_); + } + if (queriesCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.cloud.dataproc.v1.QueryList) queries_); + } + if (continueOnFailure_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, continueOnFailure_); + } + if (!getOutputFormatBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, outputFormat_); + } + { + int dataSize = 0; + for (int i = 0; i < clientTags_.size(); i++) { + dataSize += computeStringSizeNoTag(clientTags_.getRaw(i)); + } + size += dataSize; + size += 1 * getClientTagsList().size(); + } + for (java.util.Map.Entry entry : + internalGetProperties().getMap().entrySet()) { + com.google.protobuf.MapEntry properties__ = + PropertiesDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, properties__); + } + if (loggingConfig_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getLoggingConfig()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.PrestoJob)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.PrestoJob other = (com.google.cloud.dataproc.v1.PrestoJob) obj; + + if (getContinueOnFailure() != other.getContinueOnFailure()) return false; + if (!getOutputFormat().equals(other.getOutputFormat())) return false; + if (!getClientTagsList().equals(other.getClientTagsList())) return false; + if (!internalGetProperties().equals(other.internalGetProperties())) return false; + if (hasLoggingConfig() != other.hasLoggingConfig()) return false; + if (hasLoggingConfig()) { + if (!getLoggingConfig().equals(other.getLoggingConfig())) return false; + } + if (!getQueriesCase().equals(other.getQueriesCase())) return false; + switch (queriesCase_) { + case 1: + if (!getQueryFileUri().equals(other.getQueryFileUri())) return false; + break; + case 2: + if (!getQueryList().equals(other.getQueryList())) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CONTINUE_ON_FAILURE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getContinueOnFailure()); + hash = (37 * hash) + OUTPUT_FORMAT_FIELD_NUMBER; + hash = (53 * hash) + getOutputFormat().hashCode(); + if (getClientTagsCount() > 0) { + hash = (37 * hash) + CLIENT_TAGS_FIELD_NUMBER; + hash = (53 * hash) + getClientTagsList().hashCode(); + } + if (!internalGetProperties().getMap().isEmpty()) { + hash = (37 * hash) + PROPERTIES_FIELD_NUMBER; + hash = (53 * hash) + internalGetProperties().hashCode(); + } + if (hasLoggingConfig()) { + hash = (37 * hash) + LOGGING_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getLoggingConfig().hashCode(); + } + switch (queriesCase_) { + case 1: + hash = (37 * hash) + QUERY_FILE_URI_FIELD_NUMBER; + hash = (53 * hash) + getQueryFileUri().hashCode(); + break; + case 2: + hash = (37 * hash) + QUERY_LIST_FIELD_NUMBER; + hash = (53 * hash) + getQueryList().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.PrestoJob parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.PrestoJob parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.PrestoJob parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.PrestoJob parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.PrestoJob parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.PrestoJob parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.PrestoJob parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.PrestoJob parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.PrestoJob parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.PrestoJob parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.PrestoJob parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.PrestoJob parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.PrestoJob prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * A Dataproc job for running [Presto](https://prestosql.io/) queries
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.PrestoJob} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.PrestoJob) + com.google.cloud.dataproc.v1.PrestoJobOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.JobsProto + .internal_static_google_cloud_dataproc_v1_PrestoJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField(int number) { + switch (number) { + case 6: + return internalGetProperties(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField(int number) { + switch (number) { + case 6: + return internalGetMutableProperties(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.JobsProto + .internal_static_google_cloud_dataproc_v1_PrestoJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.PrestoJob.class, + com.google.cloud.dataproc.v1.PrestoJob.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.PrestoJob.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + continueOnFailure_ = false; + + outputFormat_ = ""; + + clientTags_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + internalGetMutableProperties().clear(); + if (loggingConfigBuilder_ == null) { + loggingConfig_ = null; + } else { + loggingConfig_ = null; + loggingConfigBuilder_ = null; + } + queriesCase_ = 0; + queries_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.JobsProto + .internal_static_google_cloud_dataproc_v1_PrestoJob_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.PrestoJob getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.PrestoJob.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.PrestoJob build() { + com.google.cloud.dataproc.v1.PrestoJob result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.PrestoJob buildPartial() { + com.google.cloud.dataproc.v1.PrestoJob result = + new com.google.cloud.dataproc.v1.PrestoJob(this); + int from_bitField0_ = bitField0_; + if (queriesCase_ == 1) { + result.queries_ = queries_; + } + if (queriesCase_ == 2) { + if (queryListBuilder_ == null) { + result.queries_ = queries_; + } else { + result.queries_ = queryListBuilder_.build(); + } + } + result.continueOnFailure_ = continueOnFailure_; + result.outputFormat_ = outputFormat_; + if (((bitField0_ & 0x00000001) != 0)) { + clientTags_ = clientTags_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.clientTags_ = clientTags_; + result.properties_ = internalGetProperties(); + result.properties_.makeImmutable(); + if (loggingConfigBuilder_ == null) { + result.loggingConfig_ = loggingConfig_; + } else { + result.loggingConfig_ = loggingConfigBuilder_.build(); + } + result.queriesCase_ = queriesCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.PrestoJob) { + return mergeFrom((com.google.cloud.dataproc.v1.PrestoJob) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.PrestoJob other) { + if (other == com.google.cloud.dataproc.v1.PrestoJob.getDefaultInstance()) return this; + if (other.getContinueOnFailure() != false) { + setContinueOnFailure(other.getContinueOnFailure()); + } + if (!other.getOutputFormat().isEmpty()) { + outputFormat_ = other.outputFormat_; + onChanged(); + } + if (!other.clientTags_.isEmpty()) { + if (clientTags_.isEmpty()) { + clientTags_ = other.clientTags_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureClientTagsIsMutable(); + clientTags_.addAll(other.clientTags_); + } + onChanged(); + } + internalGetMutableProperties().mergeFrom(other.internalGetProperties()); + if (other.hasLoggingConfig()) { + mergeLoggingConfig(other.getLoggingConfig()); + } + switch (other.getQueriesCase()) { + case QUERY_FILE_URI: + { + queriesCase_ = 1; + queries_ = other.queries_; + onChanged(); + break; + } + case QUERY_LIST: + { + mergeQueryList(other.getQueryList()); + break; + } + case QUERIES_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.PrestoJob parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.PrestoJob) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int queriesCase_ = 0; + private java.lang.Object queries_; + + public QueriesCase getQueriesCase() { + return QueriesCase.forNumber(queriesCase_); + } + + public Builder clearQueries() { + queriesCase_ = 0; + queries_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + /** + * + * + *
+     * The HCFS URI of the script that contains SQL queries.
+     * 
+ * + * string query_file_uri = 1; + * + * @return The queryFileUri. + */ + public java.lang.String getQueryFileUri() { + java.lang.Object ref = ""; + if (queriesCase_ == 1) { + ref = queries_; + } + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (queriesCase_ == 1) { + queries_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * The HCFS URI of the script that contains SQL queries.
+     * 
+ * + * string query_file_uri = 1; + * + * @return The bytes for queryFileUri. + */ + public com.google.protobuf.ByteString getQueryFileUriBytes() { + java.lang.Object ref = ""; + if (queriesCase_ == 1) { + ref = queries_; + } + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + if (queriesCase_ == 1) { + queries_ = b; + } + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * The HCFS URI of the script that contains SQL queries.
+     * 
+ * + * string query_file_uri = 1; + * + * @param value The queryFileUri to set. + * @return This builder for chaining. + */ + public Builder setQueryFileUri(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + queriesCase_ = 1; + queries_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * The HCFS URI of the script that contains SQL queries.
+     * 
+ * + * string query_file_uri = 1; + * + * @return This builder for chaining. + */ + public Builder clearQueryFileUri() { + if (queriesCase_ == 1) { + queriesCase_ = 0; + queries_ = null; + onChanged(); + } + return this; + } + /** + * + * + *
+     * The HCFS URI of the script that contains SQL queries.
+     * 
+ * + * string query_file_uri = 1; + * + * @param value The bytes for queryFileUri to set. + * @return This builder for chaining. + */ + public Builder setQueryFileUriBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + queriesCase_ = 1; + queries_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.QueryList, + com.google.cloud.dataproc.v1.QueryList.Builder, + com.google.cloud.dataproc.v1.QueryListOrBuilder> + queryListBuilder_; + /** + * + * + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1.QueryList query_list = 2; + * + * @return Whether the queryList field is set. + */ + public boolean hasQueryList() { + return queriesCase_ == 2; + } + /** + * + * + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1.QueryList query_list = 2; + * + * @return The queryList. + */ + public com.google.cloud.dataproc.v1.QueryList getQueryList() { + if (queryListBuilder_ == null) { + if (queriesCase_ == 2) { + return (com.google.cloud.dataproc.v1.QueryList) queries_; + } + return com.google.cloud.dataproc.v1.QueryList.getDefaultInstance(); + } else { + if (queriesCase_ == 2) { + return queryListBuilder_.getMessage(); + } + return com.google.cloud.dataproc.v1.QueryList.getDefaultInstance(); + } + } + /** + * + * + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1.QueryList query_list = 2; + */ + public Builder setQueryList(com.google.cloud.dataproc.v1.QueryList value) { + if (queryListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + queries_ = value; + onChanged(); + } else { + queryListBuilder_.setMessage(value); + } + queriesCase_ = 2; + return this; + } + /** + * + * + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1.QueryList query_list = 2; + */ + public Builder setQueryList(com.google.cloud.dataproc.v1.QueryList.Builder builderForValue) { + if (queryListBuilder_ == null) { + queries_ = builderForValue.build(); + onChanged(); + } else { + queryListBuilder_.setMessage(builderForValue.build()); + } + queriesCase_ = 2; + return this; + } + /** + * + * + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1.QueryList query_list = 2; + */ + public Builder mergeQueryList(com.google.cloud.dataproc.v1.QueryList value) { + if (queryListBuilder_ == null) { + if (queriesCase_ == 2 + && queries_ != com.google.cloud.dataproc.v1.QueryList.getDefaultInstance()) { + queries_ = + com.google.cloud.dataproc.v1.QueryList.newBuilder( + (com.google.cloud.dataproc.v1.QueryList) queries_) + .mergeFrom(value) + .buildPartial(); + } else { + queries_ = value; + } + onChanged(); + } else { + if (queriesCase_ == 2) { + queryListBuilder_.mergeFrom(value); + } + queryListBuilder_.setMessage(value); + } + queriesCase_ = 2; + return this; + } + /** + * + * + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1.QueryList query_list = 2; + */ + public Builder clearQueryList() { + if (queryListBuilder_ == null) { + if (queriesCase_ == 2) { + queriesCase_ = 0; + queries_ = null; + onChanged(); + } + } else { + if (queriesCase_ == 2) { + queriesCase_ = 0; + queries_ = null; + } + queryListBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1.QueryList.Builder getQueryListBuilder() { + return getQueryListFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1.QueryList query_list = 2; + */ + public com.google.cloud.dataproc.v1.QueryListOrBuilder getQueryListOrBuilder() { + if ((queriesCase_ == 2) && (queryListBuilder_ != null)) { + return queryListBuilder_.getMessageOrBuilder(); + } else { + if (queriesCase_ == 2) { + return (com.google.cloud.dataproc.v1.QueryList) queries_; + } + return com.google.cloud.dataproc.v1.QueryList.getDefaultInstance(); + } + } + /** + * + * + *
+     * A list of queries.
+     * 
+ * + * .google.cloud.dataproc.v1.QueryList query_list = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.QueryList, + com.google.cloud.dataproc.v1.QueryList.Builder, + com.google.cloud.dataproc.v1.QueryListOrBuilder> + getQueryListFieldBuilder() { + if (queryListBuilder_ == null) { + if (!(queriesCase_ == 2)) { + queries_ = com.google.cloud.dataproc.v1.QueryList.getDefaultInstance(); + } + queryListBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.QueryList, + com.google.cloud.dataproc.v1.QueryList.Builder, + com.google.cloud.dataproc.v1.QueryListOrBuilder>( + (com.google.cloud.dataproc.v1.QueryList) queries_, + getParentForChildren(), + isClean()); + queries_ = null; + } + queriesCase_ = 2; + onChanged(); + ; + return queryListBuilder_; + } + + private boolean continueOnFailure_; + /** + * + * + *
+     * Optional. Whether to continue executing queries if a query fails.
+     * The default value is `false`. Setting to `true` can be useful when
+     * executing independent parallel queries.
+     * 
+ * + * bool continue_on_failure = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The continueOnFailure. + */ + public boolean getContinueOnFailure() { + return continueOnFailure_; + } + /** + * + * + *
+     * Optional. Whether to continue executing queries if a query fails.
+     * The default value is `false`. Setting to `true` can be useful when
+     * executing independent parallel queries.
+     * 
+ * + * bool continue_on_failure = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The continueOnFailure to set. + * @return This builder for chaining. + */ + public Builder setContinueOnFailure(boolean value) { + + continueOnFailure_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Whether to continue executing queries if a query fails.
+     * The default value is `false`. Setting to `true` can be useful when
+     * executing independent parallel queries.
+     * 
+ * + * bool continue_on_failure = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearContinueOnFailure() { + + continueOnFailure_ = false; + onChanged(); + return this; + } + + private java.lang.Object outputFormat_ = ""; + /** + * + * + *
+     * Optional. The format in which query output will be displayed. See the
+     * Presto documentation for supported output formats
+     * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The outputFormat. + */ + public java.lang.String getOutputFormat() { + java.lang.Object ref = outputFormat_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + outputFormat_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. The format in which query output will be displayed. See the
+     * Presto documentation for supported output formats
+     * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for outputFormat. + */ + public com.google.protobuf.ByteString getOutputFormatBytes() { + java.lang.Object ref = outputFormat_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + outputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. The format in which query output will be displayed. See the
+     * Presto documentation for supported output formats
+     * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The outputFormat to set. + * @return This builder for chaining. + */ + public Builder setOutputFormat(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + outputFormat_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The format in which query output will be displayed. See the
+     * Presto documentation for supported output formats
+     * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearOutputFormat() { + + outputFormat_ = getDefaultInstance().getOutputFormat(); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The format in which query output will be displayed. See the
+     * Presto documentation for supported output formats
+     * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for outputFormat to set. + * @return This builder for chaining. + */ + public Builder setOutputFormatBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + outputFormat_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList clientTags_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureClientTagsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + clientTags_ = new com.google.protobuf.LazyStringArrayList(clientTags_); + bitField0_ |= 0x00000001; + } + } + /** + * + * + *
+     * Optional. Presto client tags to attach to this query
+     * 
+ * + * repeated string client_tags = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the clientTags. + */ + public com.google.protobuf.ProtocolStringList getClientTagsList() { + return clientTags_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. Presto client tags to attach to this query
+     * 
+ * + * repeated string client_tags = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of clientTags. + */ + public int getClientTagsCount() { + return clientTags_.size(); + } + /** + * + * + *
+     * Optional. Presto client tags to attach to this query
+     * 
+ * + * repeated string client_tags = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The clientTags at the given index. + */ + public java.lang.String getClientTags(int index) { + return clientTags_.get(index); + } + /** + * + * + *
+     * Optional. Presto client tags to attach to this query
+     * 
+ * + * repeated string client_tags = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the clientTags at the given index. + */ + public com.google.protobuf.ByteString getClientTagsBytes(int index) { + return clientTags_.getByteString(index); + } + /** + * + * + *
+     * Optional. Presto client tags to attach to this query
+     * 
+ * + * repeated string client_tags = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The clientTags to set. + * @return This builder for chaining. + */ + public Builder setClientTags(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureClientTagsIsMutable(); + clientTags_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Presto client tags to attach to this query
+     * 
+ * + * repeated string client_tags = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The clientTags to add. + * @return This builder for chaining. + */ + public Builder addClientTags(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureClientTagsIsMutable(); + clientTags_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Presto client tags to attach to this query
+     * 
+ * + * repeated string client_tags = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The clientTags to add. + * @return This builder for chaining. + */ + public Builder addAllClientTags(java.lang.Iterable values) { + ensureClientTagsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, clientTags_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Presto client tags to attach to this query
+     * 
+ * + * repeated string client_tags = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearClientTags() { + clientTags_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Presto client tags to attach to this query
+     * 
+ * + * repeated string client_tags = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the clientTags to add. + * @return This builder for chaining. + */ + public Builder addClientTagsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureClientTagsIsMutable(); + clientTags_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.MapField properties_; + + private com.google.protobuf.MapField + internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + + private com.google.protobuf.MapField + internalGetMutableProperties() { + onChanged(); + ; + if (properties_ == null) { + properties_ = + com.google.protobuf.MapField.newMapField(PropertiesDefaultEntryHolder.defaultEntry); + } + if (!properties_.isMutable()) { + properties_ = properties_.copy(); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + * + * + *
+     * Optional. A mapping of property names to values. Used to set Presto
+     * [session properties](https://prestodb.io/docs/current/sql/set-session.html)
+     * Equivalent to using the --session flag in the Presto CLI
+     * 
+ * + * map<string, string> properties = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public boolean containsProperties(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + return internalGetProperties().getMap().containsKey(key); + } + /** Use {@link #getPropertiesMap()} instead. */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + * + * + *
+     * Optional. A mapping of property names to values. Used to set Presto
+     * [session properties](https://prestodb.io/docs/current/sql/set-session.html)
+     * Equivalent to using the --session flag in the Presto CLI
+     * 
+ * + * map<string, string> properties = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + * + * + *
+     * Optional. A mapping of property names to values. Used to set Presto
+     * [session properties](https://prestodb.io/docs/current/sql/set-session.html)
+     * Equivalent to using the --session flag in the Presto CLI
+     * 
+ * + * map<string, string> properties = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.lang.String getPropertiesOrDefault( + java.lang.String key, java.lang.String defaultValue) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+     * Optional. A mapping of property names to values. Used to set Presto
+     * [session properties](https://prestodb.io/docs/current/sql/set-session.html)
+     * Equivalent to using the --session flag in the Presto CLI
+     * 
+ * + * map<string, string> properties = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.lang.String getPropertiesOrThrow(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearProperties() { + internalGetMutableProperties().getMutableMap().clear(); + return this; + } + /** + * + * + *
+     * Optional. A mapping of property names to values. Used to set Presto
+     * [session properties](https://prestodb.io/docs/current/sql/set-session.html)
+     * Equivalent to using the --session flag in the Presto CLI
+     * 
+ * + * map<string, string> properties = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeProperties(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + internalGetMutableProperties().getMutableMap().remove(key); + return this; + } + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableProperties() { + return internalGetMutableProperties().getMutableMap(); + } + /** + * + * + *
+     * Optional. A mapping of property names to values. Used to set Presto
+     * [session properties](https://prestodb.io/docs/current/sql/set-session.html)
+     * Equivalent to using the --session flag in the Presto CLI
+     * 
+ * + * map<string, string> properties = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putProperties(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + if (value == null) { + throw new java.lang.NullPointerException(); + } + internalGetMutableProperties().getMutableMap().put(key, value); + return this; + } + /** + * + * + *
+     * Optional. A mapping of property names to values. Used to set Presto
+     * [session properties](https://prestodb.io/docs/current/sql/set-session.html)
+     * Equivalent to using the --session flag in the Presto CLI
+     * 
+ * + * map<string, string> properties = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putAllProperties(java.util.Map values) { + internalGetMutableProperties().getMutableMap().putAll(values); + return this; + } + + private com.google.cloud.dataproc.v1.LoggingConfig loggingConfig_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.LoggingConfig, + com.google.cloud.dataproc.v1.LoggingConfig.Builder, + com.google.cloud.dataproc.v1.LoggingConfigOrBuilder> + loggingConfigBuilder_; + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the loggingConfig field is set. + */ + public boolean hasLoggingConfig() { + return loggingConfigBuilder_ != null || loggingConfig_ != null; + } + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The loggingConfig. + */ + public com.google.cloud.dataproc.v1.LoggingConfig getLoggingConfig() { + if (loggingConfigBuilder_ == null) { + return loggingConfig_ == null + ? com.google.cloud.dataproc.v1.LoggingConfig.getDefaultInstance() + : loggingConfig_; + } else { + return loggingConfigBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setLoggingConfig(com.google.cloud.dataproc.v1.LoggingConfig value) { + if (loggingConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + loggingConfig_ = value; + onChanged(); + } else { + loggingConfigBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setLoggingConfig( + com.google.cloud.dataproc.v1.LoggingConfig.Builder builderForValue) { + if (loggingConfigBuilder_ == null) { + loggingConfig_ = builderForValue.build(); + onChanged(); + } else { + loggingConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeLoggingConfig(com.google.cloud.dataproc.v1.LoggingConfig value) { + if (loggingConfigBuilder_ == null) { + if (loggingConfig_ != null) { + loggingConfig_ = + com.google.cloud.dataproc.v1.LoggingConfig.newBuilder(loggingConfig_) + .mergeFrom(value) + .buildPartial(); + } else { + loggingConfig_ = value; + } + onChanged(); + } else { + loggingConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearLoggingConfig() { + if (loggingConfigBuilder_ == null) { + loggingConfig_ = null; + onChanged(); + } else { + loggingConfig_ = null; + loggingConfigBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.LoggingConfig.Builder getLoggingConfigBuilder() { + + onChanged(); + return getLoggingConfigFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.LoggingConfigOrBuilder getLoggingConfigOrBuilder() { + if (loggingConfigBuilder_ != null) { + return loggingConfigBuilder_.getMessageOrBuilder(); + } else { + return loggingConfig_ == null + ? com.google.cloud.dataproc.v1.LoggingConfig.getDefaultInstance() + : loggingConfig_; + } + } + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.LoggingConfig, + com.google.cloud.dataproc.v1.LoggingConfig.Builder, + com.google.cloud.dataproc.v1.LoggingConfigOrBuilder> + getLoggingConfigFieldBuilder() { + if (loggingConfigBuilder_ == null) { + loggingConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.LoggingConfig, + com.google.cloud.dataproc.v1.LoggingConfig.Builder, + com.google.cloud.dataproc.v1.LoggingConfigOrBuilder>( + getLoggingConfig(), getParentForChildren(), isClean()); + loggingConfig_ = null; + } + return loggingConfigBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.PrestoJob) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.PrestoJob) + private static final com.google.cloud.dataproc.v1.PrestoJob DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.PrestoJob(); + } + + public static com.google.cloud.dataproc.v1.PrestoJob getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PrestoJob parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PrestoJob(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.PrestoJob getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PrestoJobOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PrestoJobOrBuilder.java new file mode 100644 index 00000000..c5c3173a --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PrestoJobOrBuilder.java @@ -0,0 +1,290 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/jobs.proto + +package com.google.cloud.dataproc.v1; + +public interface PrestoJobOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.PrestoJob) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The HCFS URI of the script that contains SQL queries.
+   * 
+ * + * string query_file_uri = 1; + * + * @return The queryFileUri. + */ + java.lang.String getQueryFileUri(); + /** + * + * + *
+   * The HCFS URI of the script that contains SQL queries.
+   * 
+ * + * string query_file_uri = 1; + * + * @return The bytes for queryFileUri. + */ + com.google.protobuf.ByteString getQueryFileUriBytes(); + + /** + * + * + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1.QueryList query_list = 2; + * + * @return Whether the queryList field is set. + */ + boolean hasQueryList(); + /** + * + * + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1.QueryList query_list = 2; + * + * @return The queryList. + */ + com.google.cloud.dataproc.v1.QueryList getQueryList(); + /** + * + * + *
+   * A list of queries.
+   * 
+ * + * .google.cloud.dataproc.v1.QueryList query_list = 2; + */ + com.google.cloud.dataproc.v1.QueryListOrBuilder getQueryListOrBuilder(); + + /** + * + * + *
+   * Optional. Whether to continue executing queries if a query fails.
+   * The default value is `false`. Setting to `true` can be useful when
+   * executing independent parallel queries.
+   * 
+ * + * bool continue_on_failure = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The continueOnFailure. + */ + boolean getContinueOnFailure(); + + /** + * + * + *
+   * Optional. The format in which query output will be displayed. See the
+   * Presto documentation for supported output formats
+   * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The outputFormat. + */ + java.lang.String getOutputFormat(); + /** + * + * + *
+   * Optional. The format in which query output will be displayed. See the
+   * Presto documentation for supported output formats
+   * 
+ * + * string output_format = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for outputFormat. + */ + com.google.protobuf.ByteString getOutputFormatBytes(); + + /** + * + * + *
+   * Optional. Presto client tags to attach to this query
+   * 
+ * + * repeated string client_tags = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the clientTags. + */ + java.util.List getClientTagsList(); + /** + * + * + *
+   * Optional. Presto client tags to attach to this query
+   * 
+ * + * repeated string client_tags = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of clientTags. + */ + int getClientTagsCount(); + /** + * + * + *
+   * Optional. Presto client tags to attach to this query
+   * 
+ * + * repeated string client_tags = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The clientTags at the given index. + */ + java.lang.String getClientTags(int index); + /** + * + * + *
+   * Optional. Presto client tags to attach to this query
+   * 
+ * + * repeated string client_tags = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the clientTags at the given index. + */ + com.google.protobuf.ByteString getClientTagsBytes(int index); + + /** + * + * + *
+   * Optional. A mapping of property names to values. Used to set Presto
+   * [session properties](https://prestodb.io/docs/current/sql/set-session.html)
+   * Equivalent to using the --session flag in the Presto CLI
+   * 
+ * + * map<string, string> properties = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getPropertiesCount(); + /** + * + * + *
+   * Optional. A mapping of property names to values. Used to set Presto
+   * [session properties](https://prestodb.io/docs/current/sql/set-session.html)
+   * Equivalent to using the --session flag in the Presto CLI
+   * 
+ * + * map<string, string> properties = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + boolean containsProperties(java.lang.String key); + /** Use {@link #getPropertiesMap()} instead. */ + @java.lang.Deprecated + java.util.Map getProperties(); + /** + * + * + *
+   * Optional. A mapping of property names to values. Used to set Presto
+   * [session properties](https://prestodb.io/docs/current/sql/set-session.html)
+   * Equivalent to using the --session flag in the Presto CLI
+   * 
+ * + * map<string, string> properties = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.Map getPropertiesMap(); + /** + * + * + *
+   * Optional. A mapping of property names to values. Used to set Presto
+   * [session properties](https://prestodb.io/docs/current/sql/set-session.html)
+   * Equivalent to using the --session flag in the Presto CLI
+   * 
+ * + * map<string, string> properties = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.lang.String getPropertiesOrDefault(java.lang.String key, java.lang.String defaultValue); + /** + * + * + *
+   * Optional. A mapping of property names to values. Used to set Presto
+   * [session properties](https://prestodb.io/docs/current/sql/set-session.html)
+   * Equivalent to using the --session flag in the Presto CLI
+   * 
+ * + * map<string, string> properties = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.lang.String getPropertiesOrThrow(java.lang.String key); + + /** + * + * + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the loggingConfig field is set. + */ + boolean hasLoggingConfig(); + /** + * + * + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The loggingConfig. + */ + com.google.cloud.dataproc.v1.LoggingConfig getLoggingConfig(); + /** + * + * + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.LoggingConfigOrBuilder getLoggingConfigOrBuilder(); + + public com.google.cloud.dataproc.v1.PrestoJob.QueriesCase getQueriesCase(); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ReservationAffinity.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ReservationAffinity.java new file mode 100644 index 00000000..b287ce7e --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ReservationAffinity.java @@ -0,0 +1,1240 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * Reservation Affinity for consuming Zonal reservation.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.ReservationAffinity} + */ +public final class ReservationAffinity extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.ReservationAffinity) + ReservationAffinityOrBuilder { + private static final long serialVersionUID = 0L; + // Use ReservationAffinity.newBuilder() to construct. + private ReservationAffinity(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReservationAffinity() { + consumeReservationType_ = 0; + key_ = ""; + values_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReservationAffinity(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ReservationAffinity( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + int rawValue = input.readEnum(); + + consumeReservationType_ = rawValue; + break; + } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + + key_ = s; + break; + } + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + values_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + values_.add(s); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + values_ = values_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_ReservationAffinity_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_ReservationAffinity_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.ReservationAffinity.class, + com.google.cloud.dataproc.v1.ReservationAffinity.Builder.class); + } + + /** + * + * + *
+   * Indicates whether to consume capacity from an reservation or not.
+   * 
+ * + * Protobuf enum {@code google.cloud.dataproc.v1.ReservationAffinity.Type} + */ + public enum Type implements com.google.protobuf.ProtocolMessageEnum { + /** TYPE_UNSPECIFIED = 0; */ + TYPE_UNSPECIFIED(0), + /** + * + * + *
+     * Do not consume from any allocated capacity.
+     * 
+ * + * NO_RESERVATION = 1; + */ + NO_RESERVATION(1), + /** + * + * + *
+     * Consume any reservation available.
+     * 
+ * + * ANY_RESERVATION = 2; + */ + ANY_RESERVATION(2), + /** + * + * + *
+     * Must consume from a specific reservation. Must specify key value fields
+     * for specifying the reservations.
+     * 
+ * + * SPECIFIC_RESERVATION = 3; + */ + SPECIFIC_RESERVATION(3), + UNRECOGNIZED(-1), + ; + + /** TYPE_UNSPECIFIED = 0; */ + public static final int TYPE_UNSPECIFIED_VALUE = 0; + /** + * + * + *
+     * Do not consume from any allocated capacity.
+     * 
+ * + * NO_RESERVATION = 1; + */ + public static final int NO_RESERVATION_VALUE = 1; + /** + * + * + *
+     * Consume any reservation available.
+     * 
+ * + * ANY_RESERVATION = 2; + */ + public static final int ANY_RESERVATION_VALUE = 2; + /** + * + * + *
+     * Must consume from a specific reservation. Must specify key value fields
+     * for specifying the reservations.
+     * 
+ * + * SPECIFIC_RESERVATION = 3; + */ + public static final int SPECIFIC_RESERVATION_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Type valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Type forNumber(int value) { + switch (value) { + case 0: + return TYPE_UNSPECIFIED; + case 1: + return NO_RESERVATION; + case 2: + return ANY_RESERVATION; + case 3: + return SPECIFIC_RESERVATION; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ReservationAffinity.getDescriptor().getEnumTypes().get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Type(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.dataproc.v1.ReservationAffinity.Type) + } + + public static final int CONSUME_RESERVATION_TYPE_FIELD_NUMBER = 1; + private int consumeReservationType_; + /** + * + * + *
+   * Optional. Type of reservation to consume
+   * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity.Type consume_reservation_type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for consumeReservationType. + */ + public int getConsumeReservationTypeValue() { + return consumeReservationType_; + } + /** + * + * + *
+   * Optional. Type of reservation to consume
+   * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity.Type consume_reservation_type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The consumeReservationType. + */ + public com.google.cloud.dataproc.v1.ReservationAffinity.Type getConsumeReservationType() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1.ReservationAffinity.Type result = + com.google.cloud.dataproc.v1.ReservationAffinity.Type.valueOf(consumeReservationType_); + return result == null + ? com.google.cloud.dataproc.v1.ReservationAffinity.Type.UNRECOGNIZED + : result; + } + + public static final int KEY_FIELD_NUMBER = 2; + private volatile java.lang.Object key_; + /** + * + * + *
+   * Optional. Corresponds to the label key of reservation resource.
+   * 
+ * + * string key = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The key. + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + key_ = s; + return s; + } + } + /** + * + * + *
+   * Optional. Corresponds to the label key of reservation resource.
+   * 
+ * + * string key = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for key. + */ + public com.google.protobuf.ByteString getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VALUES_FIELD_NUMBER = 3; + private com.google.protobuf.LazyStringList values_; + /** + * + * + *
+   * Optional. Corresponds to the label values of reservation resource.
+   * 
+ * + * repeated string values = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + return values_; + } + /** + * + * + *
+   * Optional. Corresponds to the label values of reservation resource.
+   * 
+ * + * repeated string values = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + /** + * + * + *
+   * Optional. Corresponds to the label values of reservation resource.
+   * 
+ * + * repeated string values = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + /** + * + * + *
+   * Optional. Corresponds to the label values of reservation resource.
+   * 
+ * + * repeated string values = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (consumeReservationType_ + != com.google.cloud.dataproc.v1.ReservationAffinity.Type.TYPE_UNSPECIFIED.getNumber()) { + output.writeEnum(1, consumeReservationType_); + } + if (!getKeyBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, key_); + } + for (int i = 0; i < values_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, values_.getRaw(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (consumeReservationType_ + != com.google.cloud.dataproc.v1.ReservationAffinity.Type.TYPE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, consumeReservationType_); + } + if (!getKeyBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, key_); + } + { + int dataSize = 0; + for (int i = 0; i < values_.size(); i++) { + dataSize += computeStringSizeNoTag(values_.getRaw(i)); + } + size += dataSize; + size += 1 * getValuesList().size(); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.ReservationAffinity)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.ReservationAffinity other = + (com.google.cloud.dataproc.v1.ReservationAffinity) obj; + + if (consumeReservationType_ != other.consumeReservationType_) return false; + if (!getKey().equals(other.getKey())) return false; + if (!getValuesList().equals(other.getValuesList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CONSUME_RESERVATION_TYPE_FIELD_NUMBER; + hash = (53 * hash) + consumeReservationType_; + hash = (37 * hash) + KEY_FIELD_NUMBER; + hash = (53 * hash) + getKey().hashCode(); + if (getValuesCount() > 0) { + hash = (37 * hash) + VALUES_FIELD_NUMBER; + hash = (53 * hash) + getValuesList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.ReservationAffinity parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.ReservationAffinity parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ReservationAffinity parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.ReservationAffinity parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ReservationAffinity parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.ReservationAffinity parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ReservationAffinity parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.ReservationAffinity parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ReservationAffinity parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.ReservationAffinity parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.ReservationAffinity parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.ReservationAffinity parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.ReservationAffinity prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Reservation Affinity for consuming Zonal reservation.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.ReservationAffinity} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.ReservationAffinity) + com.google.cloud.dataproc.v1.ReservationAffinityOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_ReservationAffinity_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_ReservationAffinity_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.ReservationAffinity.class, + com.google.cloud.dataproc.v1.ReservationAffinity.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.ReservationAffinity.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + consumeReservationType_ = 0; + + key_ = ""; + + values_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.ClustersProto + .internal_static_google_cloud_dataproc_v1_ReservationAffinity_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.ReservationAffinity getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.ReservationAffinity.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.ReservationAffinity build() { + com.google.cloud.dataproc.v1.ReservationAffinity result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.ReservationAffinity buildPartial() { + com.google.cloud.dataproc.v1.ReservationAffinity result = + new com.google.cloud.dataproc.v1.ReservationAffinity(this); + int from_bitField0_ = bitField0_; + result.consumeReservationType_ = consumeReservationType_; + result.key_ = key_; + if (((bitField0_ & 0x00000001) != 0)) { + values_ = values_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.values_ = values_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.ReservationAffinity) { + return mergeFrom((com.google.cloud.dataproc.v1.ReservationAffinity) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.ReservationAffinity other) { + if (other == com.google.cloud.dataproc.v1.ReservationAffinity.getDefaultInstance()) + return this; + if (other.consumeReservationType_ != 0) { + setConsumeReservationTypeValue(other.getConsumeReservationTypeValue()); + } + if (!other.getKey().isEmpty()) { + key_ = other.key_; + onChanged(); + } + if (!other.values_.isEmpty()) { + if (values_.isEmpty()) { + values_ = other.values_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureValuesIsMutable(); + values_.addAll(other.values_); + } + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.ReservationAffinity parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.ReservationAffinity) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private int consumeReservationType_ = 0; + /** + * + * + *
+     * Optional. Type of reservation to consume
+     * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity.Type consume_reservation_type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for consumeReservationType. + */ + public int getConsumeReservationTypeValue() { + return consumeReservationType_; + } + /** + * + * + *
+     * Optional. Type of reservation to consume
+     * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity.Type consume_reservation_type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for consumeReservationType to set. + * @return This builder for chaining. + */ + public Builder setConsumeReservationTypeValue(int value) { + consumeReservationType_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Type of reservation to consume
+     * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity.Type consume_reservation_type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The consumeReservationType. + */ + public com.google.cloud.dataproc.v1.ReservationAffinity.Type getConsumeReservationType() { + @SuppressWarnings("deprecation") + com.google.cloud.dataproc.v1.ReservationAffinity.Type result = + com.google.cloud.dataproc.v1.ReservationAffinity.Type.valueOf(consumeReservationType_); + return result == null + ? com.google.cloud.dataproc.v1.ReservationAffinity.Type.UNRECOGNIZED + : result; + } + /** + * + * + *
+     * Optional. Type of reservation to consume
+     * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity.Type consume_reservation_type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The consumeReservationType to set. + * @return This builder for chaining. + */ + public Builder setConsumeReservationType( + com.google.cloud.dataproc.v1.ReservationAffinity.Type value) { + if (value == null) { + throw new NullPointerException(); + } + + consumeReservationType_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Type of reservation to consume
+     * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity.Type consume_reservation_type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearConsumeReservationType() { + + consumeReservationType_ = 0; + onChanged(); + return this; + } + + private java.lang.Object key_ = ""; + /** + * + * + *
+     * Optional. Corresponds to the label key of reservation resource.
+     * 
+ * + * string key = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The key. + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + key_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Optional. Corresponds to the label key of reservation resource.
+     * 
+ * + * string key = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for key. + */ + public com.google.protobuf.ByteString getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Optional. Corresponds to the label key of reservation resource.
+     * 
+ * + * string key = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The key to set. + * @return This builder for chaining. + */ + public Builder setKey(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + key_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Corresponds to the label key of reservation resource.
+     * 
+ * + * string key = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearKey() { + + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Corresponds to the label key of reservation resource.
+     * 
+ * + * string key = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for key to set. + * @return This builder for chaining. + */ + public Builder setKeyBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + key_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList values_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureValuesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + values_ = new com.google.protobuf.LazyStringArrayList(values_); + bitField0_ |= 0x00000001; + } + } + /** + * + * + *
+     * Optional. Corresponds to the label values of reservation resource.
+     * 
+ * + * repeated string values = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the values. + */ + public com.google.protobuf.ProtocolStringList getValuesList() { + return values_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. Corresponds to the label values of reservation resource.
+     * 
+ * + * repeated string values = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of values. + */ + public int getValuesCount() { + return values_.size(); + } + /** + * + * + *
+     * Optional. Corresponds to the label values of reservation resource.
+     * 
+ * + * repeated string values = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + public java.lang.String getValues(int index) { + return values_.get(index); + } + /** + * + * + *
+     * Optional. Corresponds to the label values of reservation resource.
+     * 
+ * + * repeated string values = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + public com.google.protobuf.ByteString getValuesBytes(int index) { + return values_.getByteString(index); + } + /** + * + * + *
+     * Optional. Corresponds to the label values of reservation resource.
+     * 
+ * + * repeated string values = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The values to set. + * @return This builder for chaining. + */ + public Builder setValues(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Corresponds to the label values of reservation resource.
+     * 
+ * + * repeated string values = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The values to add. + * @return This builder for chaining. + */ + public Builder addValues(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureValuesIsMutable(); + values_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Corresponds to the label values of reservation resource.
+     * 
+ * + * repeated string values = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The values to add. + * @return This builder for chaining. + */ + public Builder addAllValues(java.lang.Iterable values) { + ensureValuesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, values_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Corresponds to the label values of reservation resource.
+     * 
+ * + * repeated string values = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearValues() { + values_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. Corresponds to the label values of reservation resource.
+     * 
+ * + * repeated string values = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the values to add. + * @return This builder for chaining. + */ + public Builder addValuesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureValuesIsMutable(); + values_.add(value); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.ReservationAffinity) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ReservationAffinity) + private static final com.google.cloud.dataproc.v1.ReservationAffinity DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.ReservationAffinity(); + } + + public static com.google.cloud.dataproc.v1.ReservationAffinity getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReservationAffinity parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ReservationAffinity(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.ReservationAffinity getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ReservationAffinityOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ReservationAffinityOrBuilder.java new file mode 100644 index 00000000..21a9f125 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ReservationAffinityOrBuilder.java @@ -0,0 +1,130 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/clusters.proto + +package com.google.cloud.dataproc.v1; + +public interface ReservationAffinityOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.ReservationAffinity) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Optional. Type of reservation to consume
+   * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity.Type consume_reservation_type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for consumeReservationType. + */ + int getConsumeReservationTypeValue(); + /** + * + * + *
+   * Optional. Type of reservation to consume
+   * 
+ * + * + * .google.cloud.dataproc.v1.ReservationAffinity.Type consume_reservation_type = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The consumeReservationType. + */ + com.google.cloud.dataproc.v1.ReservationAffinity.Type getConsumeReservationType(); + + /** + * + * + *
+   * Optional. Corresponds to the label key of reservation resource.
+   * 
+ * + * string key = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The key. + */ + java.lang.String getKey(); + /** + * + * + *
+   * Optional. Corresponds to the label key of reservation resource.
+   * 
+ * + * string key = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for key. + */ + com.google.protobuf.ByteString getKeyBytes(); + + /** + * + * + *
+   * Optional. Corresponds to the label values of reservation resource.
+   * 
+ * + * repeated string values = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the values. + */ + java.util.List getValuesList(); + /** + * + * + *
+   * Optional. Corresponds to the label values of reservation resource.
+   * 
+ * + * repeated string values = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of values. + */ + int getValuesCount(); + /** + * + * + *
+   * Optional. Corresponds to the label values of reservation resource.
+   * 
+ * + * repeated string values = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The values at the given index. + */ + java.lang.String getValues(int index); + /** + * + * + *
+   * Optional. Corresponds to the label values of reservation resource.
+   * 
+ * + * repeated string values = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the values at the given index. + */ + com.google.protobuf.ByteString getValuesBytes(int index); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJob.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJob.java new file mode 100644 index 00000000..dc7b0988 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJob.java @@ -0,0 +1,2198 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/jobs.proto + +package com.google.cloud.dataproc.v1; + +/** + * + * + *
+ * A Dataproc job for running
+ * [Apache SparkR](https://spark.apache.org/docs/latest/sparkr.html)
+ * applications on YARN.
+ * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.SparkRJob} + */ +public final class SparkRJob extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.SparkRJob) + SparkRJobOrBuilder { + private static final long serialVersionUID = 0L; + // Use SparkRJob.newBuilder() to construct. + private SparkRJob(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private SparkRJob() { + mainRFileUri_ = ""; + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new SparkRJob(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private SparkRJob( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + mainRFileUri_ = s; + break; + } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + args_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + args_.add(s); + break; + } + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000002) != 0)) { + fileUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000002; + } + fileUris_.add(s); + break; + } + case 34: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000004) != 0)) { + archiveUris_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000004; + } + archiveUris_.add(s); + break; + } + case 42: + { + if (!((mutable_bitField0_ & 0x00000008) != 0)) { + properties_ = + com.google.protobuf.MapField.newMapField( + PropertiesDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000008; + } + com.google.protobuf.MapEntry properties__ = + input.readMessage( + PropertiesDefaultEntryHolder.defaultEntry.getParserForType(), + extensionRegistry); + properties_.getMutableMap().put(properties__.getKey(), properties__.getValue()); + break; + } + case 50: + { + com.google.cloud.dataproc.v1.LoggingConfig.Builder subBuilder = null; + if (loggingConfig_ != null) { + subBuilder = loggingConfig_.toBuilder(); + } + loggingConfig_ = + input.readMessage( + com.google.cloud.dataproc.v1.LoggingConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(loggingConfig_); + loggingConfig_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + args_ = args_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000002) != 0)) { + fileUris_ = fileUris_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000004) != 0)) { + archiveUris_ = archiveUris_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.JobsProto + .internal_static_google_cloud_dataproc_v1_SparkRJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField(int number) { + switch (number) { + case 5: + return internalGetProperties(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.JobsProto + .internal_static_google_cloud_dataproc_v1_SparkRJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.SparkRJob.class, + com.google.cloud.dataproc.v1.SparkRJob.Builder.class); + } + + public static final int MAIN_R_FILE_URI_FIELD_NUMBER = 1; + private volatile java.lang.Object mainRFileUri_; + /** + * + * + *
+   * Required. The HCFS URI of the main R file to use as the driver.
+   * Must be a .R file.
+   * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The mainRFileUri. + */ + public java.lang.String getMainRFileUri() { + java.lang.Object ref = mainRFileUri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + mainRFileUri_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The HCFS URI of the main R file to use as the driver.
+   * Must be a .R file.
+   * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for mainRFileUri. + */ + public com.google.protobuf.ByteString getMainRFileUriBytes() { + java.lang.Object ref = mainRFileUri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + mainRFileUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ARGS_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList args_; + /** + * + * + *
+   * Optional. The arguments to pass to the driver.  Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the args. + */ + public com.google.protobuf.ProtocolStringList getArgsList() { + return args_; + } + /** + * + * + *
+   * Optional. The arguments to pass to the driver.  Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of args. + */ + public int getArgsCount() { + return args_.size(); + } + /** + * + * + *
+   * Optional. The arguments to pass to the driver.  Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The args at the given index. + */ + public java.lang.String getArgs(int index) { + return args_.get(index); + } + /** + * + * + *
+   * Optional. The arguments to pass to the driver.  Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the args at the given index. + */ + public com.google.protobuf.ByteString getArgsBytes(int index) { + return args_.getByteString(index); + } + + public static final int FILE_URIS_FIELD_NUMBER = 3; + private com.google.protobuf.LazyStringList fileUris_; + /** + * + * + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * R drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the fileUris. + */ + public com.google.protobuf.ProtocolStringList getFileUrisList() { + return fileUris_; + } + /** + * + * + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * R drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of fileUris. + */ + public int getFileUrisCount() { + return fileUris_.size(); + } + /** + * + * + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * R drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The fileUris at the given index. + */ + public java.lang.String getFileUris(int index) { + return fileUris_.get(index); + } + /** + * + * + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * R drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the fileUris at the given index. + */ + public com.google.protobuf.ByteString getFileUrisBytes(int index) { + return fileUris_.getByteString(index); + } + + public static final int ARCHIVE_URIS_FIELD_NUMBER = 4; + private com.google.protobuf.LazyStringList archiveUris_; + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * Spark drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the archiveUris. + */ + public com.google.protobuf.ProtocolStringList getArchiveUrisList() { + return archiveUris_; + } + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * Spark drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of archiveUris. + */ + public int getArchiveUrisCount() { + return archiveUris_.size(); + } + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * Spark drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The archiveUris at the given index. + */ + public java.lang.String getArchiveUris(int index) { + return archiveUris_.get(index); + } + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * Spark drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the archiveUris at the given index. + */ + public com.google.protobuf.ByteString getArchiveUrisBytes(int index) { + return archiveUris_.getByteString(index); + } + + public static final int PROPERTIES_FIELD_NUMBER = 5; + + private static final class PropertiesDefaultEntryHolder { + static final com.google.protobuf.MapEntry defaultEntry = + com.google.protobuf.MapEntry.newDefaultInstance( + com.google.cloud.dataproc.v1.JobsProto + .internal_static_google_cloud_dataproc_v1_SparkRJob_PropertiesEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.STRING, + ""); + } + + private com.google.protobuf.MapField properties_; + + private com.google.protobuf.MapField internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField(PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + * + * + *
+   * Optional. A mapping of property names to values, used to configure SparkR.
+   * Properties that conflict with values set by the Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public boolean containsProperties(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + return internalGetProperties().getMap().containsKey(key); + } + /** Use {@link #getPropertiesMap()} instead. */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + * + * + *
+   * Optional. A mapping of property names to values, used to configure SparkR.
+   * Properties that conflict with values set by the Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + * + * + *
+   * Optional. A mapping of property names to values, used to configure SparkR.
+   * Properties that conflict with values set by the Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.lang.String getPropertiesOrDefault( + java.lang.String key, java.lang.String defaultValue) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+   * Optional. A mapping of property names to values, used to configure SparkR.
+   * Properties that conflict with values set by the Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.lang.String getPropertiesOrThrow(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public static final int LOGGING_CONFIG_FIELD_NUMBER = 6; + private com.google.cloud.dataproc.v1.LoggingConfig loggingConfig_; + /** + * + * + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the loggingConfig field is set. + */ + public boolean hasLoggingConfig() { + return loggingConfig_ != null; + } + /** + * + * + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The loggingConfig. + */ + public com.google.cloud.dataproc.v1.LoggingConfig getLoggingConfig() { + return loggingConfig_ == null + ? com.google.cloud.dataproc.v1.LoggingConfig.getDefaultInstance() + : loggingConfig_; + } + /** + * + * + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.LoggingConfigOrBuilder getLoggingConfigOrBuilder() { + return getLoggingConfig(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getMainRFileUriBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, mainRFileUri_); + } + for (int i = 0; i < args_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, args_.getRaw(i)); + } + for (int i = 0; i < fileUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, fileUris_.getRaw(i)); + } + for (int i = 0; i < archiveUris_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, archiveUris_.getRaw(i)); + } + com.google.protobuf.GeneratedMessageV3.serializeStringMapTo( + output, internalGetProperties(), PropertiesDefaultEntryHolder.defaultEntry, 5); + if (loggingConfig_ != null) { + output.writeMessage(6, getLoggingConfig()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getMainRFileUriBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, mainRFileUri_); + } + { + int dataSize = 0; + for (int i = 0; i < args_.size(); i++) { + dataSize += computeStringSizeNoTag(args_.getRaw(i)); + } + size += dataSize; + size += 1 * getArgsList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < fileUris_.size(); i++) { + dataSize += computeStringSizeNoTag(fileUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getFileUrisList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < archiveUris_.size(); i++) { + dataSize += computeStringSizeNoTag(archiveUris_.getRaw(i)); + } + size += dataSize; + size += 1 * getArchiveUrisList().size(); + } + for (java.util.Map.Entry entry : + internalGetProperties().getMap().entrySet()) { + com.google.protobuf.MapEntry properties__ = + PropertiesDefaultEntryHolder.defaultEntry + .newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, properties__); + } + if (loggingConfig_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getLoggingConfig()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.dataproc.v1.SparkRJob)) { + return super.equals(obj); + } + com.google.cloud.dataproc.v1.SparkRJob other = (com.google.cloud.dataproc.v1.SparkRJob) obj; + + if (!getMainRFileUri().equals(other.getMainRFileUri())) return false; + if (!getArgsList().equals(other.getArgsList())) return false; + if (!getFileUrisList().equals(other.getFileUrisList())) return false; + if (!getArchiveUrisList().equals(other.getArchiveUrisList())) return false; + if (!internalGetProperties().equals(other.internalGetProperties())) return false; + if (hasLoggingConfig() != other.hasLoggingConfig()) return false; + if (hasLoggingConfig()) { + if (!getLoggingConfig().equals(other.getLoggingConfig())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + MAIN_R_FILE_URI_FIELD_NUMBER; + hash = (53 * hash) + getMainRFileUri().hashCode(); + if (getArgsCount() > 0) { + hash = (37 * hash) + ARGS_FIELD_NUMBER; + hash = (53 * hash) + getArgsList().hashCode(); + } + if (getFileUrisCount() > 0) { + hash = (37 * hash) + FILE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getFileUrisList().hashCode(); + } + if (getArchiveUrisCount() > 0) { + hash = (37 * hash) + ARCHIVE_URIS_FIELD_NUMBER; + hash = (53 * hash) + getArchiveUrisList().hashCode(); + } + if (!internalGetProperties().getMap().isEmpty()) { + hash = (37 * hash) + PROPERTIES_FIELD_NUMBER; + hash = (53 * hash) + internalGetProperties().hashCode(); + } + if (hasLoggingConfig()) { + hash = (37 * hash) + LOGGING_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getLoggingConfig().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.dataproc.v1.SparkRJob parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.SparkRJob parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkRJob parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.SparkRJob parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkRJob parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.dataproc.v1.SparkRJob parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkRJob parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.SparkRJob parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkRJob parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.SparkRJob parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.dataproc.v1.SparkRJob parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.dataproc.v1.SparkRJob parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.dataproc.v1.SparkRJob prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * A Dataproc job for running
+   * [Apache SparkR](https://spark.apache.org/docs/latest/sparkr.html)
+   * applications on YARN.
+   * 
+ * + * Protobuf type {@code google.cloud.dataproc.v1.SparkRJob} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.SparkRJob) + com.google.cloud.dataproc.v1.SparkRJobOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dataproc.v1.JobsProto + .internal_static_google_cloud_dataproc_v1_SparkRJob_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField(int number) { + switch (number) { + case 5: + return internalGetProperties(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField(int number) { + switch (number) { + case 5: + return internalGetMutableProperties(); + default: + throw new RuntimeException("Invalid map field number: " + number); + } + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1.JobsProto + .internal_static_google_cloud_dataproc_v1_SparkRJob_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1.SparkRJob.class, + com.google.cloud.dataproc.v1.SparkRJob.Builder.class); + } + + // Construct using com.google.cloud.dataproc.v1.SparkRJob.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + mainRFileUri_ = ""; + + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + internalGetMutableProperties().clear(); + if (loggingConfigBuilder_ == null) { + loggingConfig_ = null; + } else { + loggingConfig_ = null; + loggingConfigBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.dataproc.v1.JobsProto + .internal_static_google_cloud_dataproc_v1_SparkRJob_descriptor; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkRJob getDefaultInstanceForType() { + return com.google.cloud.dataproc.v1.SparkRJob.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkRJob build() { + com.google.cloud.dataproc.v1.SparkRJob result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkRJob buildPartial() { + com.google.cloud.dataproc.v1.SparkRJob result = + new com.google.cloud.dataproc.v1.SparkRJob(this); + int from_bitField0_ = bitField0_; + result.mainRFileUri_ = mainRFileUri_; + if (((bitField0_ & 0x00000001) != 0)) { + args_ = args_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.args_ = args_; + if (((bitField0_ & 0x00000002) != 0)) { + fileUris_ = fileUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.fileUris_ = fileUris_; + if (((bitField0_ & 0x00000004) != 0)) { + archiveUris_ = archiveUris_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.archiveUris_ = archiveUris_; + result.properties_ = internalGetProperties(); + result.properties_.makeImmutable(); + if (loggingConfigBuilder_ == null) { + result.loggingConfig_ = loggingConfig_; + } else { + result.loggingConfig_ = loggingConfigBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.dataproc.v1.SparkRJob) { + return mergeFrom((com.google.cloud.dataproc.v1.SparkRJob) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.dataproc.v1.SparkRJob other) { + if (other == com.google.cloud.dataproc.v1.SparkRJob.getDefaultInstance()) return this; + if (!other.getMainRFileUri().isEmpty()) { + mainRFileUri_ = other.mainRFileUri_; + onChanged(); + } + if (!other.args_.isEmpty()) { + if (args_.isEmpty()) { + args_ = other.args_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureArgsIsMutable(); + args_.addAll(other.args_); + } + onChanged(); + } + if (!other.fileUris_.isEmpty()) { + if (fileUris_.isEmpty()) { + fileUris_ = other.fileUris_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureFileUrisIsMutable(); + fileUris_.addAll(other.fileUris_); + } + onChanged(); + } + if (!other.archiveUris_.isEmpty()) { + if (archiveUris_.isEmpty()) { + archiveUris_ = other.archiveUris_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureArchiveUrisIsMutable(); + archiveUris_.addAll(other.archiveUris_); + } + onChanged(); + } + internalGetMutableProperties().mergeFrom(other.internalGetProperties()); + if (other.hasLoggingConfig()) { + mergeLoggingConfig(other.getLoggingConfig()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.dataproc.v1.SparkRJob parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.dataproc.v1.SparkRJob) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private java.lang.Object mainRFileUri_ = ""; + /** + * + * + *
+     * Required. The HCFS URI of the main R file to use as the driver.
+     * Must be a .R file.
+     * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The mainRFileUri. + */ + public java.lang.String getMainRFileUri() { + java.lang.Object ref = mainRFileUri_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + mainRFileUri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The HCFS URI of the main R file to use as the driver.
+     * Must be a .R file.
+     * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for mainRFileUri. + */ + public com.google.protobuf.ByteString getMainRFileUriBytes() { + java.lang.Object ref = mainRFileUri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + mainRFileUri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The HCFS URI of the main R file to use as the driver.
+     * Must be a .R file.
+     * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The mainRFileUri to set. + * @return This builder for chaining. + */ + public Builder setMainRFileUri(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + mainRFileUri_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The HCFS URI of the main R file to use as the driver.
+     * Must be a .R file.
+     * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearMainRFileUri() { + + mainRFileUri_ = getDefaultInstance().getMainRFileUri(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The HCFS URI of the main R file to use as the driver.
+     * Must be a .R file.
+     * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for mainRFileUri to set. + * @return This builder for chaining. + */ + public Builder setMainRFileUriBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + mainRFileUri_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList args_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureArgsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + args_ = new com.google.protobuf.LazyStringArrayList(args_); + bitField0_ |= 0x00000001; + } + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the args. + */ + public com.google.protobuf.ProtocolStringList getArgsList() { + return args_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of args. + */ + public int getArgsCount() { + return args_.size(); + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The args at the given index. + */ + public java.lang.String getArgs(int index) { + return args_.get(index); + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the args at the given index. + */ + public com.google.protobuf.ByteString getArgsBytes(int index) { + return args_.getByteString(index); + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The args to set. + * @return This builder for chaining. + */ + public Builder setArgs(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArgsIsMutable(); + args_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The args to add. + * @return This builder for chaining. + */ + public Builder addArgs(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArgsIsMutable(); + args_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The args to add. + * @return This builder for chaining. + */ + public Builder addAllArgs(java.lang.Iterable values) { + ensureArgsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, args_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearArgs() { + args_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. The arguments to pass to the driver.  Do not include arguments,
+     * such as `--conf`, that can be set as job properties, since a collision may
+     * occur that causes an incorrect job submission.
+     * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the args to add. + * @return This builder for chaining. + */ + public Builder addArgsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureArgsIsMutable(); + args_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList fileUris_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureFileUrisIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + fileUris_ = new com.google.protobuf.LazyStringArrayList(fileUris_); + bitField0_ |= 0x00000002; + } + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * R drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the fileUris. + */ + public com.google.protobuf.ProtocolStringList getFileUrisList() { + return fileUris_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * R drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of fileUris. + */ + public int getFileUrisCount() { + return fileUris_.size(); + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * R drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The fileUris at the given index. + */ + public java.lang.String getFileUris(int index) { + return fileUris_.get(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * R drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the fileUris at the given index. + */ + public com.google.protobuf.ByteString getFileUrisBytes(int index) { + return fileUris_.getByteString(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * R drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The fileUris to set. + * @return This builder for chaining. + */ + public Builder setFileUris(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFileUrisIsMutable(); + fileUris_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * R drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The fileUris to add. + * @return This builder for chaining. + */ + public Builder addFileUris(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFileUrisIsMutable(); + fileUris_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * R drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The fileUris to add. + * @return This builder for chaining. + */ + public Builder addAllFileUris(java.lang.Iterable values) { + ensureFileUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fileUris_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * R drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearFileUris() { + fileUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of files to be copied to the working directory of
+     * R drivers and distributed tasks. Useful for naively parallel tasks.
+     * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the fileUris to add. + * @return This builder for chaining. + */ + public Builder addFileUrisBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureFileUrisIsMutable(); + fileUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList archiveUris_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureArchiveUrisIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + archiveUris_ = new com.google.protobuf.LazyStringArrayList(archiveUris_); + bitField0_ |= 0x00000004; + } + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the archiveUris. + */ + public com.google.protobuf.ProtocolStringList getArchiveUrisList() { + return archiveUris_.getUnmodifiableView(); + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of archiveUris. + */ + public int getArchiveUrisCount() { + return archiveUris_.size(); + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The archiveUris at the given index. + */ + public java.lang.String getArchiveUris(int index) { + return archiveUris_.get(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the archiveUris at the given index. + */ + public com.google.protobuf.ByteString getArchiveUrisBytes(int index) { + return archiveUris_.getByteString(index); + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index to set the value at. + * @param value The archiveUris to set. + * @return This builder for chaining. + */ + public Builder setArchiveUris(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArchiveUrisIsMutable(); + archiveUris_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The archiveUris to add. + * @return This builder for chaining. + */ + public Builder addArchiveUris(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureArchiveUrisIsMutable(); + archiveUris_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param values The archiveUris to add. + * @return This builder for chaining. + */ + public Builder addAllArchiveUris(java.lang.Iterable values) { + ensureArchiveUrisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, archiveUris_); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearArchiveUris() { + archiveUris_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + * + * + *
+     * Optional. HCFS URIs of archives to be extracted in the working directory of
+     * Spark drivers and tasks. Supported file types:
+     * .jar, .tar, .tar.gz, .tgz, and .zip.
+     * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes of the archiveUris to add. + * @return This builder for chaining. + */ + public Builder addArchiveUrisBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureArchiveUrisIsMutable(); + archiveUris_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.MapField properties_; + + private com.google.protobuf.MapField + internalGetProperties() { + if (properties_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PropertiesDefaultEntryHolder.defaultEntry); + } + return properties_; + } + + private com.google.protobuf.MapField + internalGetMutableProperties() { + onChanged(); + ; + if (properties_ == null) { + properties_ = + com.google.protobuf.MapField.newMapField(PropertiesDefaultEntryHolder.defaultEntry); + } + if (!properties_.isMutable()) { + properties_ = properties_.copy(); + } + return properties_; + } + + public int getPropertiesCount() { + return internalGetProperties().getMap().size(); + } + /** + * + * + *
+     * Optional. A mapping of property names to values, used to configure SparkR.
+     * Properties that conflict with values set by the Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public boolean containsProperties(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + return internalGetProperties().getMap().containsKey(key); + } + /** Use {@link #getPropertiesMap()} instead. */ + @java.lang.Deprecated + public java.util.Map getProperties() { + return getPropertiesMap(); + } + /** + * + * + *
+     * Optional. A mapping of property names to values, used to configure SparkR.
+     * Properties that conflict with values set by the Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.Map getPropertiesMap() { + return internalGetProperties().getMap(); + } + /** + * + * + *
+     * Optional. A mapping of property names to values, used to configure SparkR.
+     * Properties that conflict with values set by the Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.lang.String getPropertiesOrDefault( + java.lang.String key, java.lang.String defaultValue) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetProperties().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * + * + *
+     * Optional. A mapping of property names to values, used to configure SparkR.
+     * Properties that conflict with values set by the Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.lang.String getPropertiesOrThrow(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + java.util.Map map = internalGetProperties().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearProperties() { + internalGetMutableProperties().getMutableMap().clear(); + return this; + } + /** + * + * + *
+     * Optional. A mapping of property names to values, used to configure SparkR.
+     * Properties that conflict with values set by the Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeProperties(java.lang.String key) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + internalGetMutableProperties().getMutableMap().remove(key); + return this; + } + /** Use alternate mutation accessors instead. */ + @java.lang.Deprecated + public java.util.Map getMutableProperties() { + return internalGetMutableProperties().getMutableMap(); + } + /** + * + * + *
+     * Optional. A mapping of property names to values, used to configure SparkR.
+     * Properties that conflict with values set by the Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putProperties(java.lang.String key, java.lang.String value) { + if (key == null) { + throw new java.lang.NullPointerException(); + } + if (value == null) { + throw new java.lang.NullPointerException(); + } + internalGetMutableProperties().getMutableMap().put(key, value); + return this; + } + /** + * + * + *
+     * Optional. A mapping of property names to values, used to configure SparkR.
+     * Properties that conflict with values set by the Dataproc API may be
+     * overwritten. Can include properties set in
+     * /etc/spark/conf/spark-defaults.conf and classes in user code.
+     * 
+ * + * map<string, string> properties = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder putAllProperties(java.util.Map values) { + internalGetMutableProperties().getMutableMap().putAll(values); + return this; + } + + private com.google.cloud.dataproc.v1.LoggingConfig loggingConfig_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.LoggingConfig, + com.google.cloud.dataproc.v1.LoggingConfig.Builder, + com.google.cloud.dataproc.v1.LoggingConfigOrBuilder> + loggingConfigBuilder_; + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the loggingConfig field is set. + */ + public boolean hasLoggingConfig() { + return loggingConfigBuilder_ != null || loggingConfig_ != null; + } + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The loggingConfig. + */ + public com.google.cloud.dataproc.v1.LoggingConfig getLoggingConfig() { + if (loggingConfigBuilder_ == null) { + return loggingConfig_ == null + ? com.google.cloud.dataproc.v1.LoggingConfig.getDefaultInstance() + : loggingConfig_; + } else { + return loggingConfigBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setLoggingConfig(com.google.cloud.dataproc.v1.LoggingConfig value) { + if (loggingConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + loggingConfig_ = value; + onChanged(); + } else { + loggingConfigBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setLoggingConfig( + com.google.cloud.dataproc.v1.LoggingConfig.Builder builderForValue) { + if (loggingConfigBuilder_ == null) { + loggingConfig_ = builderForValue.build(); + onChanged(); + } else { + loggingConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeLoggingConfig(com.google.cloud.dataproc.v1.LoggingConfig value) { + if (loggingConfigBuilder_ == null) { + if (loggingConfig_ != null) { + loggingConfig_ = + com.google.cloud.dataproc.v1.LoggingConfig.newBuilder(loggingConfig_) + .mergeFrom(value) + .buildPartial(); + } else { + loggingConfig_ = value; + } + onChanged(); + } else { + loggingConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearLoggingConfig() { + if (loggingConfigBuilder_ == null) { + loggingConfig_ = null; + onChanged(); + } else { + loggingConfig_ = null; + loggingConfigBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.LoggingConfig.Builder getLoggingConfigBuilder() { + + onChanged(); + return getLoggingConfigFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.dataproc.v1.LoggingConfigOrBuilder getLoggingConfigOrBuilder() { + if (loggingConfigBuilder_ != null) { + return loggingConfigBuilder_.getMessageOrBuilder(); + } else { + return loggingConfig_ == null + ? com.google.cloud.dataproc.v1.LoggingConfig.getDefaultInstance() + : loggingConfig_; + } + } + /** + * + * + *
+     * Optional. The runtime log config for job execution.
+     * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.LoggingConfig, + com.google.cloud.dataproc.v1.LoggingConfig.Builder, + com.google.cloud.dataproc.v1.LoggingConfigOrBuilder> + getLoggingConfigFieldBuilder() { + if (loggingConfigBuilder_ == null) { + loggingConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.dataproc.v1.LoggingConfig, + com.google.cloud.dataproc.v1.LoggingConfig.Builder, + com.google.cloud.dataproc.v1.LoggingConfigOrBuilder>( + getLoggingConfig(), getParentForChildren(), isClean()); + loggingConfig_ = null; + } + return loggingConfigBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.SparkRJob) + } + + // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SparkRJob) + private static final com.google.cloud.dataproc.v1.SparkRJob DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.SparkRJob(); + } + + public static com.google.cloud.dataproc.v1.SparkRJob getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SparkRJob parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SparkRJob(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.dataproc.v1.SparkRJob getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJobOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJobOrBuilder.java new file mode 100644 index 00000000..10bdca04 --- /dev/null +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkRJobOrBuilder.java @@ -0,0 +1,340 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/dataproc/v1/jobs.proto + +package com.google.cloud.dataproc.v1; + +public interface SparkRJobOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.SparkRJob) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The HCFS URI of the main R file to use as the driver.
+   * Must be a .R file.
+   * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The mainRFileUri. + */ + java.lang.String getMainRFileUri(); + /** + * + * + *
+   * Required. The HCFS URI of the main R file to use as the driver.
+   * Must be a .R file.
+   * 
+ * + * string main_r_file_uri = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for mainRFileUri. + */ + com.google.protobuf.ByteString getMainRFileUriBytes(); + + /** + * + * + *
+   * Optional. The arguments to pass to the driver.  Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the args. + */ + java.util.List getArgsList(); + /** + * + * + *
+   * Optional. The arguments to pass to the driver.  Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of args. + */ + int getArgsCount(); + /** + * + * + *
+   * Optional. The arguments to pass to the driver.  Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The args at the given index. + */ + java.lang.String getArgs(int index); + /** + * + * + *
+   * Optional. The arguments to pass to the driver.  Do not include arguments,
+   * such as `--conf`, that can be set as job properties, since a collision may
+   * occur that causes an incorrect job submission.
+   * 
+ * + * repeated string args = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the args at the given index. + */ + com.google.protobuf.ByteString getArgsBytes(int index); + + /** + * + * + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * R drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the fileUris. + */ + java.util.List getFileUrisList(); + /** + * + * + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * R drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of fileUris. + */ + int getFileUrisCount(); + /** + * + * + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * R drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The fileUris at the given index. + */ + java.lang.String getFileUris(int index); + /** + * + * + *
+   * Optional. HCFS URIs of files to be copied to the working directory of
+   * R drivers and distributed tasks. Useful for naively parallel tasks.
+   * 
+ * + * repeated string file_uris = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the fileUris at the given index. + */ + com.google.protobuf.ByteString getFileUrisBytes(int index); + + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * Spark drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return A list containing the archiveUris. + */ + java.util.List getArchiveUrisList(); + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * Spark drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The count of archiveUris. + */ + int getArchiveUrisCount(); + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * Spark drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the element to return. + * @return The archiveUris at the given index. + */ + java.lang.String getArchiveUris(int index); + /** + * + * + *
+   * Optional. HCFS URIs of archives to be extracted in the working directory of
+   * Spark drivers and tasks. Supported file types:
+   * .jar, .tar, .tar.gz, .tgz, and .zip.
+   * 
+ * + * repeated string archive_uris = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param index The index of the value to return. + * @return The bytes of the archiveUris at the given index. + */ + com.google.protobuf.ByteString getArchiveUrisBytes(int index); + + /** + * + * + *
+   * Optional. A mapping of property names to values, used to configure SparkR.
+   * Properties that conflict with values set by the Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getPropertiesCount(); + /** + * + * + *
+   * Optional. A mapping of property names to values, used to configure SparkR.
+   * Properties that conflict with values set by the Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + boolean containsProperties(java.lang.String key); + /** Use {@link #getPropertiesMap()} instead. */ + @java.lang.Deprecated + java.util.Map getProperties(); + /** + * + * + *
+   * Optional. A mapping of property names to values, used to configure SparkR.
+   * Properties that conflict with values set by the Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.Map getPropertiesMap(); + /** + * + * + *
+   * Optional. A mapping of property names to values, used to configure SparkR.
+   * Properties that conflict with values set by the Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.lang.String getPropertiesOrDefault(java.lang.String key, java.lang.String defaultValue); + /** + * + * + *
+   * Optional. A mapping of property names to values, used to configure SparkR.
+   * Properties that conflict with values set by the Dataproc API may be
+   * overwritten. Can include properties set in
+   * /etc/spark/conf/spark-defaults.conf and classes in user code.
+   * 
+ * + * map<string, string> properties = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.lang.String getPropertiesOrThrow(java.lang.String key); + + /** + * + * + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the loggingConfig field is set. + */ + boolean hasLoggingConfig(); + /** + * + * + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The loggingConfig. + */ + com.google.cloud.dataproc.v1.LoggingConfig getLoggingConfig(); + /** + * + * + *
+   * Optional. The runtime log config for job execution.
+   * 
+ * + * + * .google.cloud.dataproc.v1.LoggingConfig logging_config = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.dataproc.v1.LoggingConfigOrBuilder getLoggingConfigOrBuilder(); +} diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequest.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequest.java index 0e1ba7ba..28681279 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequest.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequest.java @@ -381,7 +381,8 @@ public com.google.cloud.dataproc.v1.ClusterOrBuilder getClusterOrBuilder() { * interrupting jobs in progress. Timeout specifies how long to wait for jobs * in progress to finish before forcefully removing nodes (and potentially * interrupting jobs). Default timeout is 0 (for forceful decommission), and - * the maximum allowed timeout is 1 day. + * the maximum allowed timeout is 1 day. (see JSON representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). * Only supported on Dataproc image versions 1.2 and higher. * * @@ -403,7 +404,8 @@ public boolean hasGracefulDecommissionTimeout() { * interrupting jobs in progress. Timeout specifies how long to wait for jobs * in progress to finish before forcefully removing nodes (and potentially * interrupting jobs). Default timeout is 0 (for forceful decommission), and - * the maximum allowed timeout is 1 day. + * the maximum allowed timeout is 1 day. (see JSON representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). * Only supported on Dataproc image versions 1.2 and higher. * * @@ -427,7 +429,8 @@ public com.google.protobuf.Duration getGracefulDecommissionTimeout() { * interrupting jobs in progress. Timeout specifies how long to wait for jobs * in progress to finish before forcefully removing nodes (and potentially * interrupting jobs). Default timeout is 0 (for forceful decommission), and - * the maximum allowed timeout is 1 day. + * the maximum allowed timeout is 1 day. (see JSON representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). * Only supported on Dataproc image versions 1.2 and higher. * * @@ -1683,7 +1686,8 @@ public com.google.cloud.dataproc.v1.ClusterOrBuilder getClusterOrBuilder() { * interrupting jobs in progress. Timeout specifies how long to wait for jobs * in progress to finish before forcefully removing nodes (and potentially * interrupting jobs). Default timeout is 0 (for forceful decommission), and - * the maximum allowed timeout is 1 day. + * the maximum allowed timeout is 1 day. (see JSON representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). * Only supported on Dataproc image versions 1.2 and higher. * * @@ -1705,7 +1709,8 @@ public boolean hasGracefulDecommissionTimeout() { * interrupting jobs in progress. Timeout specifies how long to wait for jobs * in progress to finish before forcefully removing nodes (and potentially * interrupting jobs). Default timeout is 0 (for forceful decommission), and - * the maximum allowed timeout is 1 day. + * the maximum allowed timeout is 1 day. (see JSON representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). * Only supported on Dataproc image versions 1.2 and higher. * * @@ -1733,7 +1738,8 @@ public com.google.protobuf.Duration getGracefulDecommissionTimeout() { * interrupting jobs in progress. Timeout specifies how long to wait for jobs * in progress to finish before forcefully removing nodes (and potentially * interrupting jobs). Default timeout is 0 (for forceful decommission), and - * the maximum allowed timeout is 1 day. + * the maximum allowed timeout is 1 day. (see JSON representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). * Only supported on Dataproc image versions 1.2 and higher. * * @@ -1763,7 +1769,8 @@ public Builder setGracefulDecommissionTimeout(com.google.protobuf.Duration value * interrupting jobs in progress. Timeout specifies how long to wait for jobs * in progress to finish before forcefully removing nodes (and potentially * interrupting jobs). Default timeout is 0 (for forceful decommission), and - * the maximum allowed timeout is 1 day. + * the maximum allowed timeout is 1 day. (see JSON representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). * Only supported on Dataproc image versions 1.2 and higher. * * @@ -1791,7 +1798,8 @@ public Builder setGracefulDecommissionTimeout( * interrupting jobs in progress. Timeout specifies how long to wait for jobs * in progress to finish before forcefully removing nodes (and potentially * interrupting jobs). Default timeout is 0 (for forceful decommission), and - * the maximum allowed timeout is 1 day. + * the maximum allowed timeout is 1 day. (see JSON representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). * Only supported on Dataproc image versions 1.2 and higher. * * @@ -1825,7 +1833,8 @@ public Builder mergeGracefulDecommissionTimeout(com.google.protobuf.Duration val * interrupting jobs in progress. Timeout specifies how long to wait for jobs * in progress to finish before forcefully removing nodes (and potentially * interrupting jobs). Default timeout is 0 (for forceful decommission), and - * the maximum allowed timeout is 1 day. + * the maximum allowed timeout is 1 day. (see JSON representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). * Only supported on Dataproc image versions 1.2 and higher. * * @@ -1853,7 +1862,8 @@ public Builder clearGracefulDecommissionTimeout() { * interrupting jobs in progress. Timeout specifies how long to wait for jobs * in progress to finish before forcefully removing nodes (and potentially * interrupting jobs). Default timeout is 0 (for forceful decommission), and - * the maximum allowed timeout is 1 day. + * the maximum allowed timeout is 1 day. (see JSON representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). * Only supported on Dataproc image versions 1.2 and higher. * * @@ -1875,7 +1885,8 @@ public com.google.protobuf.Duration.Builder getGracefulDecommissionTimeoutBuilde * interrupting jobs in progress. Timeout specifies how long to wait for jobs * in progress to finish before forcefully removing nodes (and potentially * interrupting jobs). Default timeout is 0 (for forceful decommission), and - * the maximum allowed timeout is 1 day. + * the maximum allowed timeout is 1 day. (see JSON representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). * Only supported on Dataproc image versions 1.2 and higher. * * @@ -1901,7 +1912,8 @@ public com.google.protobuf.DurationOrBuilder getGracefulDecommissionTimeoutOrBui * interrupting jobs in progress. Timeout specifies how long to wait for jobs * in progress to finish before forcefully removing nodes (and potentially * interrupting jobs). Default timeout is 0 (for forceful decommission), and - * the maximum allowed timeout is 1 day. + * the maximum allowed timeout is 1 day. (see JSON representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). * Only supported on Dataproc image versions 1.2 and higher. * * diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequestOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequestOrBuilder.java index d86af69a..cf39dac2 100644 --- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequestOrBuilder.java +++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequestOrBuilder.java @@ -147,7 +147,8 @@ public interface UpdateClusterRequestOrBuilder * interrupting jobs in progress. Timeout specifies how long to wait for jobs * in progress to finish before forcefully removing nodes (and potentially * interrupting jobs). Default timeout is 0 (for forceful decommission), and - * the maximum allowed timeout is 1 day. + * the maximum allowed timeout is 1 day. (see JSON representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). * Only supported on Dataproc image versions 1.2 and higher. * * @@ -167,7 +168,8 @@ public interface UpdateClusterRequestOrBuilder * interrupting jobs in progress. Timeout specifies how long to wait for jobs * in progress to finish before forcefully removing nodes (and potentially * interrupting jobs). Default timeout is 0 (for forceful decommission), and - * the maximum allowed timeout is 1 day. + * the maximum allowed timeout is 1 day. (see JSON representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). * Only supported on Dataproc image versions 1.2 and higher. * * @@ -187,7 +189,8 @@ public interface UpdateClusterRequestOrBuilder * interrupting jobs in progress. Timeout specifies how long to wait for jobs * in progress to finish before forcefully removing nodes (and potentially * interrupting jobs). Default timeout is 0 (for forceful decommission), and - * the maximum allowed timeout is 1 day. + * the maximum allowed timeout is 1 day. (see JSON representation of + * [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). * Only supported on Dataproc image versions 1.2 and higher. * * diff --git a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/autoscaling_policies.proto b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/autoscaling_policies.proto index 51fbc87d..53321d89 100644 --- a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/autoscaling_policies.proto +++ b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/autoscaling_policies.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -28,7 +27,6 @@ option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dat option java_multiple_files = true; option java_outer_classname = "AutoscalingPoliciesProto"; option java_package = "com.google.cloud.dataproc.v1"; - option (google.api.resource_definition) = { type: "dataproc.googleapis.com/Region" pattern: "projects/{project}/regions/{region}" diff --git a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/clusters.proto b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/clusters.proto index bc254589..f8db707e 100644 --- a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/clusters.proto +++ b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/clusters.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -20,7 +19,6 @@ package google.cloud.dataproc.v1; import "google/api/annotations.proto"; import "google/api/client.proto"; import "google/api/field_behavior.proto"; -import "google/cloud/dataproc/v1/operations.proto"; import "google/cloud/dataproc/v1/shared.proto"; import "google/longrunning/operations.proto"; import "google/protobuf/duration.proto"; @@ -40,7 +38,7 @@ service ClusterController { // Creates a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/projects/{project_id}/regions/{region}/clusters" @@ -55,7 +53,7 @@ service ClusterController { // Updates a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). rpc UpdateCluster(UpdateClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { patch: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}" @@ -65,12 +63,11 @@ service ClusterController { response_type: "Cluster" metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata" }; - option (google.api.method_signature) = "project_id,region,cluster_name,cluster,update_mask"; } // Deletes a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). rpc DeleteCluster(DeleteClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { delete: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}" @@ -101,11 +98,11 @@ service ClusterController { // Gets cluster diagnostic information. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). // After the operation completes, // [Operation.response][google.longrunning.Operation.response] // contains - // [DiagnoseClusterResults](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). + // [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). rpc DiagnoseCluster(DiagnoseClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose" @@ -215,6 +212,9 @@ message ClusterConfig { // Optional. Security settings for the cluster. SecurityConfig security_config = 16 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Lifecycle setting for the cluster. + LifecycleConfig lifecycle_config = 17 [(google.api.field_behavior) = OPTIONAL]; } // Autoscaling Policy config associated with the cluster. @@ -322,9 +322,12 @@ message GceClusterConfig { // [Project and instance // metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). map metadata = 5; + + // Optional. Reservation Affinity for consuming Zonal reservation. + ReservationAffinity reservation_affinity = 11 [(google.api.field_behavior) = OPTIONAL]; } -// Optional. The config settings for Compute Engine resources in +// The config settings for Compute Engine resources in // an instance group, such as a master or worker group. message InstanceGroupConfig { // Optional. The number of VM instances in the instance group. @@ -438,7 +441,10 @@ message NodeInitializationAction { string executable_file = 1 [(google.api.field_behavior) = REQUIRED]; // Optional. Amount of time executable has to complete. Default is - // 10 minutes. Cluster creation fails with an explanatory error message (the + // 10 minutes (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + // + // Cluster creation fails with an explanatory error message (the // name of the executable that caused the error and the exceeded timeout // period) if the executable is not completed at end of the timeout period. google.protobuf.Duration execution_timeout = 2 [(google.api.field_behavior) = OPTIONAL]; @@ -495,7 +501,8 @@ message ClusterStatus { (google.api.field_behavior) = OPTIONAL ]; - // Output only. Time when this state was entered. + // Output only. Time when this state was entered (see JSON representation of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). google.protobuf.Timestamp state_start_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Additional state information that includes @@ -613,6 +620,36 @@ message SoftwareConfig { repeated Component optional_components = 3 [(google.api.field_behavior) = OPTIONAL]; } +// Specifies the cluster auto-delete schedule configuration. +message LifecycleConfig { + // Optional. The duration to keep the cluster alive while idling (when no jobs + // are running). Passing this threshold will cause the cluster to be + // deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON + // representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json). + google.protobuf.Duration idle_delete_ttl = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Either the exact time the cluster should be deleted at or + // the cluster maximum age. + oneof ttl { + // Optional. The time when cluster will be auto-deleted (see JSON representation of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Timestamp auto_delete_time = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The lifetime duration of cluster. The cluster will be + // auto-deleted at the end of this period. Minimum value is 10 minutes; + // maximum value is 14 days (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Duration auto_delete_ttl = 3 [(google.api.field_behavior) = OPTIONAL]; + } + + // Output only. The time when cluster became idle (most recent job finished) + // and became eligible for deletion due to idleness (see JSON representation + // of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Timestamp idle_start_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + // Contains cluster daemon metrics, such as HDFS and YARN stats. // // **Beta Feature**: This report is available for testing purposes only. It may @@ -671,7 +708,8 @@ message UpdateClusterRequest { // interrupting jobs in progress. Timeout specifies how long to wait for jobs // in progress to finish before forcefully removing nodes (and potentially // interrupting jobs). Default timeout is 0 (for forceful decommission), and - // the maximum allowed timeout is 1 day. + // the maximum allowed timeout is 1 day. (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). // // Only supported on Dataproc image versions 1.2 and higher. google.protobuf.Duration graceful_decommission_timeout = 6 [(google.api.field_behavior) = OPTIONAL]; @@ -854,3 +892,30 @@ message DiagnoseClusterResults { // diagnostics. string output_uri = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; } + +// Reservation Affinity for consuming Zonal reservation. +message ReservationAffinity { + // Indicates whether to consume capacity from an reservation or not. + enum Type { + TYPE_UNSPECIFIED = 0; + + // Do not consume from any allocated capacity. + NO_RESERVATION = 1; + + // Consume any reservation available. + ANY_RESERVATION = 2; + + // Must consume from a specific reservation. Must specify key value fields + // for specifying the reservations. + SPECIFIC_RESERVATION = 3; + } + + // Optional. Type of reservation to consume + Type consume_reservation_type = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Corresponds to the label key of reservation resource. + string key = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Corresponds to the label values of reservation resource. + repeated string values = 3 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/jobs.proto b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/jobs.proto index bcb68fed..85921dc4 100644 --- a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/jobs.proto +++ b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/jobs.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,6 +19,7 @@ package google.cloud.dataproc.v1; import "google/api/annotations.proto"; import "google/api/client.proto"; import "google/api/field_behavior.proto"; +import "google/longrunning/operations.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; @@ -69,9 +70,9 @@ service JobController { // Starts a job cancellation request. To access the job resource // after cancellation, call - // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) + // [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) // or - // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). + // [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). rpc CancelJob(CancelJobRequest) returns (Job) { option (google.api.http) = { post: "/v1/projects/{project_id}/regions/{region}/jobs/{job_id}:cancel" @@ -387,6 +388,71 @@ message PigJob { LoggingConfig logging_config = 7 [(google.api.field_behavior) = OPTIONAL]; } +// A Dataproc job for running +// [Apache SparkR](https://spark.apache.org/docs/latest/sparkr.html) +// applications on YARN. +message SparkRJob { + // Required. The HCFS URI of the main R file to use as the driver. + // Must be a .R file. + string main_r_file_uri = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The arguments to pass to the driver. Do not include arguments, + // such as `--conf`, that can be set as job properties, since a collision may + // occur that causes an incorrect job submission. + repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of files to be copied to the working directory of + // R drivers and distributed tasks. Useful for naively parallel tasks. + repeated string file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of archives to be extracted in the working directory of + // Spark drivers and tasks. Supported file types: + // .jar, .tar, .tar.gz, .tgz, and .zip. + repeated string archive_uris = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A mapping of property names to values, used to configure SparkR. + // Properties that conflict with values set by the Dataproc API may be + // overwritten. Can include properties set in + // /etc/spark/conf/spark-defaults.conf and classes in user code. + map properties = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// A Dataproc job for running [Presto](https://prestosql.io/) queries +message PrestoJob { + // Required. The sequence of Presto queries to execute, specified as + // either an HCFS file URI or as a list of queries. + oneof queries { + // The HCFS URI of the script that contains SQL queries. + string query_file_uri = 1; + + // A list of queries. + QueryList query_list = 2; + } + + // Optional. Whether to continue executing queries if a query fails. + // The default value is `false`. Setting to `true` can be useful when + // executing independent parallel queries. + bool continue_on_failure = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The format in which query output will be displayed. See the + // Presto documentation for supported output formats + string output_format = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Presto client tags to attach to this query + repeated string client_tags = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A mapping of property names to values. Used to set Presto + // [session properties](https://prestodb.io/docs/current/sql/set-session.html) + // Equivalent to using the --session flag in the Presto CLI + map properties = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 7 [(google.api.field_behavior) = OPTIONAL]; +} + // Dataproc job config. message JobPlacement { // Required. The name of the cluster where the job will be submitted. @@ -562,23 +628,29 @@ message Job { // Required. The application/framework-specific portion of the job. oneof type_job { - // Job is a Hadoop job. - HadoopJob hadoop_job = 3; + // Optional. Job is a Hadoop job. + HadoopJob hadoop_job = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a Spark job. + SparkJob spark_job = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a PySpark job. + PySparkJob pyspark_job = 5 [(google.api.field_behavior) = OPTIONAL]; - // Job is a Spark job. - SparkJob spark_job = 4; + // Optional. Job is a Hive job. + HiveJob hive_job = 6 [(google.api.field_behavior) = OPTIONAL]; - // Job is a Pyspark job. - PySparkJob pyspark_job = 5; + // Optional. Job is a Pig job. + PigJob pig_job = 7 [(google.api.field_behavior) = OPTIONAL]; - // Job is a Hive job. - HiveJob hive_job = 6; + // Optional. Job is a SparkR job. + SparkRJob spark_r_job = 21 [(google.api.field_behavior) = OPTIONAL]; - // Job is a Pig job. - PigJob pig_job = 7; + // Optional. Job is a SparkSql job. + SparkSqlJob spark_sql_job = 12 [(google.api.field_behavior) = OPTIONAL]; - // Job is a SparkSql job. - SparkSqlJob spark_sql_job = 12; + // Optional. Job is a Presto job. + PrestoJob presto_job = 23 [(google.api.field_behavior) = OPTIONAL]; } // Output only. The job status. Additional application-specific diff --git a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/operations.proto b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/operations.proto index 4af2a5f8..724d2a89 100644 --- a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/operations.proto +++ b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/operations.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/shared.proto b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/shared.proto index 74bd56a8..c6ff8f28 100644 --- a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/shared.proto +++ b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/shared.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/workflow_templates.proto b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/workflow_templates.proto index 30b5ced4..2db55798 100644 --- a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/workflow_templates.proto +++ b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/workflow_templates.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -78,9 +77,9 @@ service WorkflowTemplateService { // clusters to be deleted. // // The [Operation.metadata][google.longrunning.Operation.metadata] will be - // [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + // [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). // Also see [Using - // WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + // WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). // // On successful completion, // [Operation.response][google.longrunning.Operation.response] will be @@ -119,9 +118,9 @@ service WorkflowTemplateService { // clusters to be deleted. // // The [Operation.metadata][google.longrunning.Operation.metadata] will be - // [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + // [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). // Also see [Using - // WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + // WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). // // On successful completion, // [Operation.response][google.longrunning.Operation.response] will be @@ -320,22 +319,16 @@ message OrderedJob { // Required. The job definition. oneof job_type { - // Job is a Hadoop job. HadoopJob hadoop_job = 2; - // Job is a Spark job. SparkJob spark_job = 3; - // Job is a Pyspark job. PySparkJob pyspark_job = 4; - // Job is a Hive job. HiveJob hive_job = 5; - // Job is a Pig job. PigJob pig_job = 6; - // Job is a SparkSql job. SparkSqlJob spark_sql_job = 7; } @@ -708,9 +701,7 @@ message UpdateWorkflowTemplateRequest { // Required. The updated workflow template. // // The `template.version` field must match the current version. - WorkflowTemplate template = 1 [ - (google.api.field_behavior) = REQUIRED - ]; + WorkflowTemplate template = 1 [(google.api.field_behavior) = REQUIRED]; } // A request to list workflow templates in a project. diff --git a/synth.metadata b/synth.metadata index 729b5e21..c2d06bb2 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,5 +1,5 @@ { - "updateTime": "2020-02-18T23:48:43.590140Z", + "updateTime": "2020-02-21T09:47:48.033325Z", "sources": [ { "generator": { @@ -12,23 +12,23 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/java-dataproc.git", - "sha": "7ffc22b6a0119c9d6302ebc8a60eaad97cc92a37" + "sha": "9f02988efe5f2a603c1e8e016d966dfe2c1e13f5" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "ab2685d8d3a0e191dc8aef83df36773c07cb3d06", - "internalRef": "295738415", - "log": "ab2685d8d3a0e191dc8aef83df36773c07cb3d06\nfix: Dataproc v1 - AutoscalingPolicy annotation\n\nThis adds the second resource name pattern to the\nAutoscalingPolicy resource.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 295738415\n\n8a1020bf6828f6e3c84c3014f2c51cb62b739140\nUpdate cloud asset api v1p4beta1.\n\nPiperOrigin-RevId: 295286165\n\n5cfa105206e77670369e4b2225597386aba32985\nAdd service control related proto build rule.\n\nPiperOrigin-RevId: 295262088\n\nee4dddf805072004ab19ac94df2ce669046eec26\nmonitoring v3: Add prefix \"https://cloud.google.com/\" into the link for global access\ncl 295167522, get ride of synth.py hacks\n\nPiperOrigin-RevId: 295238095\n\n" + "sha": "e5bc9566ae057fb4c92f8b7e047f1c8958235b53", + "internalRef": "296357191", + "log": "e5bc9566ae057fb4c92f8b7e047f1c8958235b53\nDeprecate the endpoint_uris field, as it is unused.\n\nPiperOrigin-RevId: 296357191\n\n8c12e2b4dca94e12bff9f538bdac29524ff7ef7a\nUpdate Dataproc v1 client.\n\nPiperOrigin-RevId: 296336662\n\n17567c4a1ef0a9b50faa87024d66f8acbb561089\nRemoving erroneous comment, a la https://github.com/googleapis/java-speech/pull/103\n\nPiperOrigin-RevId: 296332968\n\n3eaaaf8626ce5b0c0bc7eee05e143beffa373b01\nAdd BUILD.bazel for v1 secretmanager.googleapis.com\n\nPiperOrigin-RevId: 296274723\n\ne76149c3d992337f85eeb45643106aacae7ede82\nMove securitycenter v1 to use generate from annotations.\n\nPiperOrigin-RevId: 296266862\n\n203740c78ac69ee07c3bf6be7408048751f618f8\nAdd StackdriverLoggingConfig field to Cloud Tasks v2 API.\n\nPiperOrigin-RevId: 296256388\n\ne4117d5e9ed8bbca28da4a60a94947ca51cb2083\nCreate a Bazel BUILD file for the google.actions.type export.\n\nPiperOrigin-RevId: 296212567\n\na9639a0a9854fd6e1be08bba1ac3897f4f16cb2f\nAdd secretmanager.googleapis.com v1 protos\n\nPiperOrigin-RevId: 295983266\n\nce4f4c21d9dd2bfab18873a80449b9d9851efde8\nasset: v1p1beta1 remove SearchResources and SearchIamPolicies\n\nPiperOrigin-RevId: 295861722\n\ncb61d6c2d070b589980c779b68ffca617f789116\nasset: v1p1beta1 remove SearchResources and SearchIamPolicies\n\nPiperOrigin-RevId: 295855449\n\n" } }, { "git": { "name": "synthtool", "remote": "rpc://devrel/cloud/libraries/tools/autosynth", - "sha": "dd7cd93888cbeb1d4c56a1ca814491c7813160e8" + "sha": "706a38c26db42299845396cdae55db635c38794a" } }, {