Skip to content

Commit

Permalink
fix(dataproc): update the API
Browse files Browse the repository at this point in the history
#### dataproc:v1
The following keys were changed:
- schemas.BasicYarnAutoscalingConfig.properties.scaleDownFactor.description
- schemas.BasicYarnAutoscalingConfig.properties.scaleUpFactor.description
- schemas.PySparkJob.properties.archiveUris.description
- schemas.PySparkJob.properties.fileUris.description
- schemas.SparkJob.properties.archiveUris.description
- schemas.SparkJob.properties.fileUris.description
- schemas.SparkRJob.properties.archiveUris.description
- schemas.SparkRJob.properties.fileUris.description

#### dataproc:v1beta2
The following keys were changed:
- schemas.BasicYarnAutoscalingConfig.properties.scaleDownFactor.description
- schemas.BasicYarnAutoscalingConfig.properties.scaleUpFactor.description
- schemas.PySparkJob.properties.archiveUris.description
- schemas.PySparkJob.properties.fileUris.description
- schemas.SoftwareConfig.properties.optionalComponents.enumDescriptions
- schemas.SoftwareConfig.properties.optionalComponents.items.enum
- schemas.SparkJob.properties.archiveUris.description
- schemas.SparkJob.properties.fileUris.description
- schemas.SparkRJob.properties.archiveUris.description
- schemas.SparkRJob.properties.fileUris.description
- schemas.WorkflowTemplate.description
  • Loading branch information
yoshi-automation authored and JustinBeckwith committed Jun 12, 2020
1 parent b5ab2e1 commit d04d55a
Show file tree
Hide file tree
Showing 4 changed files with 36 additions and 40 deletions.
18 changes: 9 additions & 9 deletions discovery/dataproc-v1.json
Expand Up @@ -2129,7 +2129,7 @@
}
}
},
"revision": "20200511",
"revision": "20200528",
"rootUrl": "https://dataproc.googleapis.com/",
"schemas": {
"AcceleratorConfig": {
Expand Down Expand Up @@ -2211,7 +2211,7 @@
"type": "string"
},
"scaleDownFactor": {
"description": "Required. Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job.Bounds: 0.0, 1.0.",
"description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works for more information.Bounds: 0.0, 1.0.",
"format": "double",
"type": "number"
},
Expand All @@ -2221,7 +2221,7 @@
"type": "number"
},
"scaleUpFactor": {
"description": "Required. Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.",
"description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works for more information.Bounds: 0.0, 1.0.",
"format": "double",
"type": "number"
},
Expand Down Expand Up @@ -3616,7 +3616,7 @@
"id": "PySparkJob",
"properties": {
"archiveUris": {
"description": "Optional. HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.",
"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.",
"items": {
"type": "string"
},
Expand All @@ -3630,7 +3630,7 @@
"type": "array"
},
"fileUris": {
"description": "Optional. HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.",
"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.",
"items": {
"type": "string"
},
Expand Down Expand Up @@ -3800,7 +3800,7 @@
"id": "SparkJob",
"properties": {
"archiveUris": {
"description": "Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.",
"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.",
"items": {
"type": "string"
},
Expand All @@ -3814,7 +3814,7 @@
"type": "array"
},
"fileUris": {
"description": "Optional. HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.",
"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.",
"items": {
"type": "string"
},
Expand Down Expand Up @@ -3854,7 +3854,7 @@
"id": "SparkRJob",
"properties": {
"archiveUris": {
"description": "Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.",
"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.",
"items": {
"type": "string"
},
Expand All @@ -3868,7 +3868,7 @@
"type": "array"
},
"fileUris": {
"description": "Optional. HCFS URIs of files to be copied to the working directory of R drivers and distributed tasks. Useful for naively parallel tasks.",
"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.",
"items": {
"type": "string"
},
Expand Down
24 changes: 10 additions & 14 deletions discovery/dataproc-v1beta2.json
Expand Up @@ -2242,7 +2242,7 @@
}
}
},
"revision": "20200511",
"revision": "20200528",
"rootUrl": "https://dataproc.googleapis.com/",
"schemas": {
"AcceleratorConfig": {
Expand Down Expand Up @@ -2324,7 +2324,7 @@
"type": "string"
},
"scaleDownFactor": {
"description": "Required. Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job.Bounds: 0.0, 1.0.",
"description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works for more information.Bounds: 0.0, 1.0.",
"format": "double",
"type": "number"
},
Expand All @@ -2334,7 +2334,7 @@
"type": "number"
},
"scaleUpFactor": {
"description": "Required. Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.",
"description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works for more information.Bounds: 0.0, 1.0.",
"format": "double",
"type": "number"
},
Expand Down Expand Up @@ -3795,7 +3795,7 @@
"id": "PySparkJob",
"properties": {
"archiveUris": {
"description": "Optional. HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.",
"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.",
"items": {
"type": "string"
},
Expand All @@ -3809,7 +3809,7 @@
"type": "array"
},
"fileUris": {
"description": "Optional. HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.",
"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.",
"items": {
"type": "string"
},
Expand Down Expand Up @@ -3944,9 +3944,7 @@
"enumDescriptions": [
"Unspecified component.",
"The Anaconda python distribution.",
"Docker",
"The Druid query engine.",
"Flink",
"HBase.",
"The Hive Web HCatalog (the REST service for accessing HCatalog).",
"The Jupyter Notebook.",
Expand All @@ -3961,9 +3959,7 @@
"enum": [
"COMPONENT_UNSPECIFIED",
"ANACONDA",
"DOCKER",
"DRUID",
"FLINK",
"HBASE",
"HIVE_WEBHCAT",
"JUPYTER",
Expand Down Expand Up @@ -3993,7 +3989,7 @@
"id": "SparkJob",
"properties": {
"archiveUris": {
"description": "Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.",
"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.",
"items": {
"type": "string"
},
Expand All @@ -4007,7 +4003,7 @@
"type": "array"
},
"fileUris": {
"description": "Optional. HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.",
"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.",
"items": {
"type": "string"
},
Expand Down Expand Up @@ -4047,7 +4043,7 @@
"id": "SparkRJob",
"properties": {
"archiveUris": {
"description": "Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.",
"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.",
"items": {
"type": "string"
},
Expand All @@ -4061,7 +4057,7 @@
"type": "array"
},
"fileUris": {
"description": "Optional. HCFS URIs of files to be copied to the working directory of R drivers and distributed tasks. Useful for naively parallel tasks.",
"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.",
"items": {
"type": "string"
},
Expand Down Expand Up @@ -4395,7 +4391,7 @@
"type": "object"
},
"WorkflowTemplate": {
"description": "A Dataproc workflow template resource.",
"description": "A Dataproc workflow template resource. Next ID: 11",
"id": "WorkflowTemplate",
"properties": {
"createTime": {
Expand Down
16 changes: 8 additions & 8 deletions src/apis/dataproc/v1.ts
Expand Up @@ -193,15 +193,15 @@ export namespace dataproc_v1 {
*/
gracefulDecommissionTimeout?: string | null;
/**
* Required. Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job.Bounds: 0.0, 1.0.
* Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works for more information.Bounds: 0.0, 1.0.
*/
scaleDownFactor?: number | null;
/**
* Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
*/
scaleDownMinWorkerFraction?: number | null;
/**
* Required. Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.
* Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works for more information.Bounds: 0.0, 1.0.
*/
scaleUpFactor?: number | null;
/**
Expand Down Expand Up @@ -1246,15 +1246,15 @@ export namespace dataproc_v1 {
*/
export interface Schema$PySparkJob {
/**
* Optional. HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
* Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
*/
archiveUris?: string[] | null;
/**
* Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
*/
args?: string[] | null;
/**
* Optional. HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.
* Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
*/
fileUris?: string[] | null;
/**
Expand Down Expand Up @@ -1353,15 +1353,15 @@ export namespace dataproc_v1 {
*/
export interface Schema$SparkJob {
/**
* Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
* Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
*/
archiveUris?: string[] | null;
/**
* Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
*/
args?: string[] | null;
/**
* Optional. HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.
* Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
*/
fileUris?: string[] | null;
/**
Expand Down Expand Up @@ -1390,15 +1390,15 @@ export namespace dataproc_v1 {
*/
export interface Schema$SparkRJob {
/**
* Optional. HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
* Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
*/
archiveUris?: string[] | null;
/**
* Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
*/
args?: string[] | null;
/**
* Optional. HCFS URIs of files to be copied to the working directory of R drivers and distributed tasks. Useful for naively parallel tasks.
* Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
*/
fileUris?: string[] | null;
/**
Expand Down

0 comments on commit d04d55a

Please sign in to comment.